{
"resetEverything": true,
"user": {
"id": "",
"firstName": "",
"lastName": "",
"lastWebsiteVisit": "",
"isSubscribed": false,
"domainMembershipMap": {},
"fbUserId": "",
"email": "",
"isAdmin": false,
"emailFrequency": "",
"emailThreshold": 0,
"ignoreMathjax": false,
"showAdvancedEditorMode": false,
"isSlackMember": false,
"analyticsId": "aid:e9HO+5vkvHshq2Pr+lyPYVRJ8WbMLUv5+qMDClS3yDs",
"hasReceivedMaintenanceUpdates": false,
"hasReceivedNotifications": false,
"newNotificationCount": 0,
"newAchievementCount": 0,
"maintenanceUpdateCount": 0,
"invitesClaimed": [],
"mailchimpInterests": {},
"continueBayesPath": null,
"continueLogPath": null
},
"pages": {
"2": {
"likeableId": "938",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 13,
"dislikeCount": 0,
"likeScore": 13,
"individualLikes": [],
"pageId": "2",
"edit": 4,
"editSummary": "",
"prevEdit": 3,
"currentEdit": 4,
"wasPublished": true,
"type": "group",
"title": "Eliezer Yudkowsky",
"clickbait": "Cofounder, with Nick Bostrom, of the field of value alignment theory.",
"textLength": 512,
"alias": "EliezerYudkowsky",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2015-12-19 01:46:45",
"pageCreatorId": "2",
"pageCreatedAt": "2015-09-04 16:14:58",
"seeDomainId": "0",
"editDomainId": "2",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 5,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 1816,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"198": {
"likeableId": "266",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "198",
"edit": 4,
"editSummary": "",
"prevEdit": 3,
"currentEdit": 4,
"wasPublished": true,
"type": "wiki",
"title": "Team Arbital",
"clickbait": "The people behind Arbital",
"textLength": 184,
"alias": "TeamArbital",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-06-17 16:55:46",
"pageCreatorId": "1",
"pageCreatedAt": "2015-12-13 23:14:48",
"seeDomainId": "0",
"editDomainId": "8",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 906,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"227": {
"likeableId": "1005",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "227",
"edit": 11,
"editSummary": "",
"prevEdit": 10,
"currentEdit": 11,
"wasPublished": true,
"type": "wiki",
"title": "Strictly confused",
"clickbait": "A hypothesis is strictly confused by the raw data, if the hypothesis did much worse in predicting it than the hypothesis itself expected.",
"textLength": 8722,
"alias": "strictly_confused",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "32",
"editCreatedAt": "2016-07-04 04:08:40",
"pageCreatorId": "2",
"pageCreatedAt": "2016-02-21 04:48:53",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 2,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 419,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"505": {
"likeableId": "2923",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 4,
"dislikeCount": 0,
"likeScore": 4,
"individualLikes": [],
"pageId": "505",
"edit": 14,
"editSummary": "",
"prevEdit": 13,
"currentEdit": 14,
"wasPublished": true,
"type": "wiki",
"title": "Report likelihoods not p-values: FAQ",
"clickbait": "",
"textLength": 35017,
"alias": "likelihood_not_pvalue_faq",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2cl",
"editCreatedAt": "2017-09-09 16:57:58",
"pageCreatorId": "32",
"pageCreatedAt": "2016-07-04 05:30:47",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 746,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"693": {
"likeableId": "3554",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "693",
"edit": 15,
"editSummary": "",
"prevEdit": 14,
"currentEdit": 15,
"wasPublished": true,
"type": "wiki",
"title": "High-speed intro to Bayes's rule",
"clickbait": "A high-speed introduction to Bayes's Rule on one page, for the impatient and mathematically adept.",
"textLength": 23382,
"alias": "bayes_rule_fast_intro",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2017-12-25 00:39:11",
"pageCreatorId": "2",
"pageCreatedAt": "2016-09-29 04:37:11",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 1,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 8426,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"909": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "909",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"To be sure. Does this mean that the claim\n\"*We ...\"",
"clickbait": "",
"textLength": 173,
"alias": "909",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "8zq",
"editCreatedAt": "2018-02-15 17:42:25",
"pageCreatorId": "8zq",
"pageCreatedAt": "2018-02-15 17:42:25",
"seeDomainId": "0",
"editDomainId": "3017",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Bayesian: I wrote pretty much the same Python program when I was first converting to Bayesianism and finding out about likelihood ratios and feeling skeptical about the system maybe being abusable in some way, and then a friend of mine found out about likelihood ratios and he wrote essentially the same program, also in Python\\. And lo, he found that false evidence of 20:1 for the coin being 55% biased was found at least once, somewhere along the way\\.\\.\\. 1\\.4% of the time\\. If you asked for more extreme likelihood ratios, the chances of finding them dropped off even faster\\.",
"anchorText": "he found that false evidence of 20:1 for the coin being 55% biased was found at least once, somewhere along the way\\.\\.\\. 1\\.4% of the time",
"anchorOffset": 340,
"mergedInto": "",
"isDeleted": false,
"viewCount": 1903,
"text": "To be sure. Does this mean that the claim\n\"*We have observed 20 times against 1 that the coin is 55% biased*\" \nis only made 1.4% of the time?\n\nIf so, it seems like a lot... ",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"14z": {
"likeableId": "139",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "14z",
"edit": 7,
"editSummary": "updating",
"prevEdit": 6,
"currentEdit": 7,
"wasPublished": true,
"type": "wiki",
"title": "Arbital domain",
"clickbait": "What is a domain? Why is it important?",
"textLength": 1145,
"alias": "arbital_domain",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-21 23:31:57",
"pageCreatorId": "1",
"pageCreatedAt": "2015-10-10 22:31:00",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 82,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1bv": {
"likeableId": "312",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1bv",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "wiki",
"title": "Probability theory",
"clickbait": "The logic of science; coherence relations on quantitative degrees of belief.",
"textLength": 79,
"alias": "probability_theory",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2015-12-18 22:16:55",
"pageCreatorId": "2",
"pageCreatedAt": "2015-12-18 22:16:55",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 618,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1lw": {
"likeableId": "559",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1lw",
"edit": 5,
"editSummary": "added links",
"prevEdit": 4,
"currentEdit": 5,
"wasPublished": true,
"type": "wiki",
"title": "Mathematics",
"clickbait": "Mathematics is the study of numbers and other ideal objects that can be described by axioms.",
"textLength": 745,
"alias": "math",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-06-22 17:49:03",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-15 03:02:51",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 2008,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1lz": {
"likeableId": "562",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1lz",
"edit": 54,
"editSummary": "",
"prevEdit": 53,
"currentEdit": 54,
"wasPublished": true,
"type": "wiki",
"title": "Bayes' rule",
"clickbait": "Bayes' rule is the core theorem of probability theory saying how to revise our beliefs when we make a new observation.",
"textLength": 6132,
"alias": "bayes_rule",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2017-02-21 23:15:37",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-15 04:12:00",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": true,
"todoCount": 1,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 116608,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1r8": {
"likeableId": "694",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 3,
"dislikeCount": 0,
"likeScore": 3,
"individualLikes": [],
"pageId": "1r8",
"edit": 21,
"editSummary": "",
"prevEdit": 20,
"currentEdit": 21,
"wasPublished": true,
"type": "wiki",
"title": "Bayesian reasoning",
"clickbait": "A probability-theory-based view of the world; a coherent way of changing probabilistic beliefs based on evidence.",
"textLength": 752,
"alias": "bayes_reasoning",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-07-26 16:28:17",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-26 03:14:44",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 871,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1rf": {
"likeableId": "699",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 4,
"dislikeCount": 0,
"likeScore": 4,
"individualLikes": [],
"pageId": "1rf",
"edit": 11,
"editSummary": "",
"prevEdit": 10,
"currentEdit": 11,
"wasPublished": true,
"type": "wiki",
"title": "Probability",
"clickbait": "The degree to which someone believes something, measured on a scale from 0 to 1, allowing us to do math to it.",
"textLength": 4788,
"alias": "probability",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-08-26 11:14:18",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-26 22:05:40",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 1,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 691,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1rj": {
"likeableId": "702",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1rj",
"edit": 25,
"editSummary": "",
"prevEdit": 24,
"currentEdit": 25,
"wasPublished": true,
"type": "wiki",
"title": "Conditional probability",
"clickbait": "The notation for writing \"The probability that someone has green eyes, if we know that they have red hair.\"",
"textLength": 6468,
"alias": "conditional_probability",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2016-10-08 02:05:05",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-26 23:06:38",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 4972,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1rp": {
"likeableId": "707",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1rp",
"edit": 11,
"editSummary": "removed spurious comma",
"prevEdit": 10,
"currentEdit": 11,
"wasPublished": true,
"type": "wiki",
"title": "Posterior probability",
"clickbait": "What we believe, after seeing the evidence and doing a Bayesian update.",
"textLength": 870,
"alias": "posterior_probability",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "5",
"editCreatedAt": "2016-07-10 07:08:40",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-27 05:32:22",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 227,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1rq": {
"likeableId": "708",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1rq",
"edit": 19,
"editSummary": "formatting",
"prevEdit": 18,
"currentEdit": 19,
"wasPublished": true,
"type": "wiki",
"title": "Relative likelihood",
"clickbait": "How relatively likely an observation is, given two or more hypotheses, determines the strength and direction of evidence.",
"textLength": 3195,
"alias": "relative_likelihood",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-08-04 14:00:09",
"pageCreatorId": "2",
"pageCreatedAt": "2016-01-27 06:47:22",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 1,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 605,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1x5": {
"likeableId": "848",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 8,
"dislikeCount": 0,
"likeScore": 8,
"individualLikes": [],
"pageId": "1x5",
"edit": 27,
"editSummary": "",
"prevEdit": 26,
"currentEdit": 27,
"wasPublished": true,
"type": "wiki",
"title": "Bayes' rule: Odds form",
"clickbait": "The simplest and most easily understandable form of Bayes' rule uses relative odds.",
"textLength": 6053,
"alias": "bayes_rule_odds",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2016-10-13 00:56:37",
"pageCreatorId": "2",
"pageCreatedAt": "2016-02-08 01:43:10",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 29553,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1zg": {
"likeableId": "921",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1zg",
"edit": 29,
"editSummary": "",
"prevEdit": 28,
"currentEdit": 29,
"wasPublished": true,
"type": "wiki",
"title": "Bayes' rule: Vector form",
"clickbait": "For when you want to apply Bayes' rule to lots of evidence and lots of variables, all in one go. (This is more or less how spam filters work.)",
"textLength": 11401,
"alias": "bayes_rule_multiple",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "8d0",
"editCreatedAt": "2017-05-23 00:31:28",
"pageCreatorId": "2",
"pageCreatedAt": "2016-02-13 19:54:09",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 5702,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1zj": {
"likeableId": "923",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1zj",
"edit": 23,
"editSummary": "Replacing an \"of course\" with a more explicit explanation.",
"prevEdit": 22,
"currentEdit": 23,
"wasPublished": true,
"type": "wiki",
"title": "Bayes' rule: Functional form",
"clickbait": "Bayes' rule for to continuous variables.",
"textLength": 5363,
"alias": "bayes_rule_functional",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "5",
"editCreatedAt": "2016-10-11 01:21:34",
"pageCreatorId": "2",
"pageCreatedAt": "2016-02-13 20:51:58",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 2,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 1892,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"1zq": {
"likeableId": "929",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "1zq",
"edit": 119,
"editSummary": "",
"prevEdit": 118,
"currentEdit": 119,
"wasPublished": true,
"type": "wiki",
"title": "Bayes' rule: Guide",
"clickbait": "The Arbital guide to Bayes' rule",
"textLength": 3820,
"alias": "bayes_rule_guide",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-10-25 23:43:14",
"pageCreatorId": "2",
"pageCreatedAt": "2016-02-14 00:00:33",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": true,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 113056,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"2v": {
"likeableId": "1760",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "2v",
"edit": 27,
"editSummary": "",
"prevEdit": 26,
"currentEdit": 27,
"wasPublished": true,
"type": "wiki",
"title": "AI alignment",
"clickbait": "The great civilizational problem of creating artificially intelligent computer systems such that running them is a good idea.",
"textLength": 5071,
"alias": "ai_alignment",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2017-01-27 20:32:06",
"pageCreatorId": "2",
"pageCreatedAt": "2015-03-26 23:12:18",
"seeDomainId": "0",
"editDomainId": "2",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 3,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 3481,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"3d": {
"likeableId": "2273",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "3d",
"edit": 33,
"editSummary": "",
"prevEdit": 32,
"currentEdit": 33,
"wasPublished": true,
"type": "wiki",
"title": "Arbital",
"clickbait": "Arbital is the place for crowdsourced, intuitive math explanations.",
"textLength": 5201,
"alias": "Arbital",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-08-08 16:07:52",
"pageCreatorId": "1",
"pageCreatedAt": "2015-03-30 22:19:47",
"seeDomainId": "0",
"editDomainId": "8",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 2320,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"3hs": {
"likeableId": "2499",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "3hs",
"edit": 19,
"editSummary": "added link to exemplar pages",
"prevEdit": 18,
"currentEdit": 19,
"wasPublished": true,
"type": "wiki",
"title": "Author's guide to Arbital",
"clickbait": "How to write intuitive, flexible content on Arbital.",
"textLength": 4420,
"alias": "author_guide_to_arbital",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-08-08 14:32:40",
"pageCreatorId": "1",
"pageCreatedAt": "2016-05-10 17:55:35",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 310,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4v": {
"likeableId": "2318",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 2,
"dislikeCount": 0,
"likeScore": 2,
"individualLikes": [],
"pageId": "4v",
"edit": 6,
"editSummary": "",
"prevEdit": 5,
"currentEdit": 6,
"wasPublished": true,
"type": "wiki",
"title": "Work in progress",
"clickbait": "This page is being actively worked on by an editor. Check with them before making major changes.",
"textLength": 131,
"alias": "work_in_progress_meta_tag",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-05 22:48:12",
"pageCreatorId": "2",
"pageCreatedAt": "2015-04-17 01:27:41",
"seeDomainId": "0",
"editDomainId": "8",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 1,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 71,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4vr": {
"likeableId": "2877",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 3,
"dislikeCount": 0,
"likeScore": 3,
"individualLikes": [],
"pageId": "4vr",
"edit": 15,
"editSummary": "",
"prevEdit": 13,
"currentEdit": 15,
"wasPublished": true,
"type": "wiki",
"title": "Subjective probability",
"clickbait": "Probability is in the mind, not in the environment. If you don't know whether a coin came up heads or tails, that's a fact about you, not a fact about the coin.",
"textLength": 4199,
"alias": "subjective_probability",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2017-02-08 18:36:49",
"pageCreatorId": "2",
"pageCreatedAt": "2016-06-28 02:49:01",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 1,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 315,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4xx": {
"likeableId": "2882",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 13,
"dislikeCount": 0,
"likeScore": 13,
"individualLikes": [
"2",
"2vh",
"32",
"414",
"4c1",
"4tg",
"5lh",
"5wf",
"8c0",
"8s6",
"8wr",
"8xq",
"9h5"
],
"pageId": "4xx",
"edit": 43,
"editSummary": "",
"prevEdit": 42,
"currentEdit": 43,
"wasPublished": true,
"type": "wiki",
"title": "Likelihood functions, p-values, and the replication crisis",
"clickbait": "What's the whole Bayesian-vs.-frequentist debate about?",
"textLength": 48649,
"alias": "likelihood_vs_pvalue",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2018-07-02 01:10:21",
"pageCreatorId": "2",
"pageCreatedAt": "2016-06-30 03:01:39",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 3044,
"text": "[summary: \nOr, __Why Switching From Reporting p-values to Reporting Likelihood Functions Might Help Fix the Replication Crisis: A personal view by Eliezer Yudkowsky.__\n\nShort version: Report [56t likelihoods], not p-values.]\n\n__Or: Switching From Reporting p-values to Reporting Likelihood Functions Might Help Fix the Replication Crisis: A personal view by Eliezer Yudkowsky.__\n\n_Disclaimers:_\n\n- _This dialogue was written by a [1r8 Bayesian]. The voice of the Scientist in the dialogue below may fail to pass the [Ideological Turing Test](https://en.wikipedia.org/wiki/Ideological_Turing_Test) for frequentism, that is, it may fail to do justice to frequentist arguments and counterarguments._\n- _It does not seem sociologically realistic, to the author, that the proposal below could be adopted by the scientific community at large within the next 10 years. It seemed worth writing down nevertheless._\n\n_If you don't already know Bayes' rule, check out Arbital's [1zq Guide to Bayes' Rule] if confused._\n\n----------\n\n**Moderator:** Hello, everyone. I'm here today with the **Scientist,** a working experimentalist in... chemical psychology, or something; with the **Bayesian,** who's going to explain why, on their view, we can make progress on the replication crisis by replacing p-values with some sort of Bayesian thing--\n\n**Undergrad:** Sorry, can you repeat that?\n\n**Moderator:** And finally, the **Confused Undergrad** on my right. **Bayesian,** would you care to start by explaining the rough idea?\n\n**Bayesian:** Well, the rough idea is something like this. Suppose we flip a possibly-unfair coin six times, and observe the sequence HHHHHT. Should we be suspicious that the coin is biased?\n\n**Scientist:** No.\n\n**Bayesian:** This isn't a literal coin. Let's say we present a series of experimental subjects with two cookies on a plate, one with green sprinkles and one with red sprinkles. The first five people took cookies with green sprinkles and the sixth person took a cookie with red sprinkles. %note: And they all saw separate plates, on a table in the waiting room marked \"please take only one\" so nobody knew what was being tested, and none of them saw the others' cookie choices.% Do we think most people prefer green-sprinkled cookies or do we think it was just random?\n\n**Undergrad:** I think I would be *suspicious* that maybe people liked green sprinkles better. Or at least that the sort of people who go to the university and get used as test subjects like green sprinkles better. Yes, even if I just saw that happen in the first six cases. But I'm guessing I'm going to get dumped-on for that.\n\n**Scientist:** I think I would be genuinely not-yet-suspicious. There's just too much stuff that looks good after N=6 that doesn't pan out with N=60.\n\n**Bayesian:** I'd at least strongly suspect that people in the test population *don't* mostly prefer red sprinkles. But the reason I introduced this example is as an oversimplified example of how current scientific statistics calculate so-called \"p-values\", and what a Bayesian sees as the central problem with that.\n\n**Scientist:** And we can't use a more realistic example with 30 subjects?\n\n**Bayesian:** That would not be nice to the Confused Undergrad.\n\n**Undergrad:** *Seconded.*\n\n**Bayesian:** So: Heads, heads, heads, heads, heads, tails. I ask: is this \"statistically significant\", as current conventional statisticians would have the phrase?\n\n**Scientist:** I reply: no. On the null hypothesis that the coin is fair, or analogously that people have no strong preference between green and red sprinkles, we should expect to see a result as extreme as this in 14 out of 64 cases.\n\n**Undergrad:** Okay, just to make sure I have this straight: That's because we're considering results like HHHTHH or TTTTTT to be equally or more extreme, and there are 14 total possible cases like that, and we flipped the coin 6 times which gives us $2^6 = 64$ possible results. 14/64 = 22%, which is not less than 5%, so this is not statistically significant at the $p<0.05$ level.\n\n**Scientist:** That's right. However, I'd also like to observe as a matter of practice that even if you get HHHHHH on your first six flips, I don't advise stopping there and sending in a paper where you claim that the coin is biased towards heads.\n\n**Bayesian:** Because if you can decide to *stop* flipping the coin at a time of your choice, then we have to ask \"How likely it is that you can find some place to stop flipping the coin where it looks like there's a significant number of heads?\" That's a whole different kettle of fish according to the p-value concept.\n\n**Scientist:** I was just thinking that N=6 is not a good number of experimental subjects when it comes to testing cookie preferences. But yes, that too.\n\n**Undergrad:** Uh... why does it make a difference if I can decide when to stop flipping the coin?\n\n**Bayesian:** What an excellent question.\n\n**Scientist:** Well, this is where the concept of p-values is less straightforward than plugging the numbers into a statistics package and believing whatever the stats package says. If you previously decided to flip exactly six coins, and then stop, regardless of what results you got, then you would get a result as extreme as \"HHHHHH\" or \"TTTTTT\" 2/64 of the time, or 3.1%, so p<0.05. However, suppose that instead you are a bad fraudulent scientist, or maybe just an ignorant undergraduate who doesn't realize what they're doing wrong. Instead of picking the number of flips in advance, you keep flipping the coin until the statistics software package tells you that you got a result that *would have been* statistically significant *if,* contrary to the actual facts of the case, you'd decided *in advance* to flip the coin exactly that many times. But you didn't decide in advance to flip the coin that many times. You decided it after checking the actual results. Which you are not allowed to do.\n\n**Undergrad:** I've heard that before, but I'm not sure I understand on a gut level why it's bad for me to decide when I've collected enough data.\n\n**Scientist:** What we're trying to do here is set up a test that the null hypothesis cannot pass--to make sure that where there's no fire, there's unlikely to be smoke. We want a complete experimental process which is unlikely to generate a \"statistically significant\" discovery if there's no real phenomenon being investigated. If you flip the coin exactly six times, if you decide that in advance, then you are less than 5% likely to get a result as extreme as \"six heads\" or \"six tails\". If you flip the coin repeatedly, and check *repeatedly* for a result that *would* have had p<0.05 if you'd decided in advance to flip the coin exactly that many times, your chance of getting a nod from the statistics package is *much greater* than 5%. You're carrying out a process which is much more than 5% likely to yell \"smoke\" in the absence of fire.\n\n**Bayesian:** The way I like to explain the problem is like this: Suppose you flip a coin six times and get HHHHHT. If you, in the secret depths of your mind, in your inner heart that no other mortal knows, decided to flip the coin exactly six times and then stop, this result is not statistically significant; p=0.22. If you decided in the secret depths of your heart, that you were going to *keep flipping the coin until it came up tails,* then the result HHHHHT *is* statistically significant with p=0.03, because the chance of a fair coin requiring you to wait six or more flips to get one tail is 1/32.\n\n**Undergrad:** What.\n\n**Scientist:** It's a bit of a parody, obviously--nobody would really decide to flip a coin until they got one tail, then stop--but the Bayesian is technically correct about how the rules for p-values work. What we're asking is how rare our outcome is, within the class of outcomes we *could* have gotten. The person who keeps flipping the coin until they get one tail has possible outcomes {T, HT, HHT, HHHT, HHHHT, HHHHHT, HHHHHHT...} and so on. The class of outcomes where you get to the sixth round or later is the class of outcomes {HHHHHT, HHHHHHT...} and so on, a set of outcomes which collectively have a probability of 1/64 + 1/128 + 1/256... = 1/32. Whereas if you flip the coin exactly six times, your class of possible outcomes is {TTTTTT, TTTTTH, TTTTHT, TTTTHH...}, a set of 64 possibilities within which the outcome HHHHHT is something we could lump in with {HHHHTH, HHHTHH, THTTTT...} and so on. So although it's counterintuitive, if we really had decided to run the first experiment, HHHHHT would be a statistically significant result that a fair coin would be unlikely to give us. And if we'd decided to run the second experiment, HHHHHT would not be a statistically significant result because fair coins sometimes do something *like* that.\n\n**Bayesian:** And it doesn't bother you that the meaning of your experiment depends on your private state of mind?\n\n**Scientist:** It's an honor system. Just like the process doesn't work if you lie about which coinflips you actually saw, the process doesn't work--is not a fair test in which non-fires are unlikely to generate smoke--if you lie about *which experiment you performed.* You must honestly say the rules you followed to flip the coin the way you did. Unfortunately, since what you were thinking about the experiment is less clearly visible than the actual coinflips, people are much more likely to twist *how they selected* their number of experimental subjects, or how they selected which tests to run on the data, than they are to tell a blatant lie about what the data said. That's p-hacking. There are, unfortunately, much subtler and less obvious ways of generating smoke without fire than claiming post facto to have followed the rule of flipping the coin until it came up tails. It's a serious problem, and underpins some large part of the great replication crisis, nobody's sure exactly how much.\n\n**Undergrad:** That... sorta makes sense, maybe? I'm guessing this is one of those cases where I have to work through a lot of example problems before it becomes obvious.\n\n**Bayesian:** No.\n\n**Undergrad:** No?\n\n**Bayesian:** You were right the first time, Undergrad. If what the experimentalist is *thinking* has no causal impact on the coin, then the experimentalist's thoughts cannot possibly make any difference to what the coin is saying to us about Nature. My dear Undergrad, you are being taught weird, ad-hoc, overcomplicated rules that aren't even internally consistent--rules that theoretically output *different* wrong answers depending on your private state of mind! And *that* is a problem that runs far deeper into the replication crisis than people misreporting their inner thoughts.\n\n**Scientist:** A bold claim to say the least. But don't be coy; tell us what you think we should be doing instead?\n\n**Bayesian:** I analyze as follows: The exact result HHHHHT has a 1/64 or roughly 1.6% probability of being produced by a fair coin flipped six times. To simplify matters, suppose we for some reason were already pondering the hypothesis that the coin was biased to produce 5/6 heads--again, this is an unrealistic example we can de-simplify later. Then this hypothetical biased coin would have a $(5/6)^5 \\cdot (1/6)^1 \\approx 6.7\\%$ probability of producing HHHHHT. So between our two hypotheses \"The coin is fair\" and \"The coin is biased to produce 5/6ths heads\", our exact experimental result is *4.3 times more likely* to be observed in the second case. HHHHHT is also 0.01% likely to be produced if the coin is biased to produce 1/6th heads and 5/6ths tails, so we've already seen some quite strong evidence *against* that particular hypothesis, if anyone was considering it. The exact experimental outcome HHHHHT is 146 times as likely to be produced by a fair coin as by a coin biased to produce 1/6th heads. %note: Recall the Bayesian's earlier thought that, after seeing five subjects select green cookies followed by one subject selecting a red cookie, we'd already picked up strong evidence *against* the proposition: \"Subjects in this experimental population lopsidedly prefer red cookies over green cookies.\"%\n\n**Undergrad:** Well, I think I can follow the calculation you just did, but I'm not clear on what that calculation means.\n\n**Bayesian:** I'll try to explain the meaning shortly, but first, note this. That calculation we just did has *no dependency whatsoever* on *why* you flipped the coin six times. You could have stopped after six because you thought you'd seen enough coinflips. You could have done an extra coinflip after the first five because Namagiri Thayar spoke to you in a dream. The coin doesn't care. The coin isn't affected. It remains true that the exact result HHHHHT is 23% as likely to be produced by a fair coin as by a biased coin that comes up heads 5/6ths of the time.\n\n**Scientist:** I agree that this is an interesting property of the calculation that you just did. And then what?\n\n**Bayesian:** You report the results in a journal. Preferably including the raw data so that others can calculate the likelihoods for any other hypotheses of interest. Say somebody else suddenly becomes interested in the hypothesis \"The coin is biased to produce 9/10ths heads.\" Seeing HHHHHT is 5.9% likely in that case, so 88% as likely than if the coin is biased to produce 5/6ths heads (making the data 6.7% likely), or 3.7 times as likely than if the coin is fair (making the data 1.6% likely). But you shouldn't have to think of all possible hypotheses in advance. Just report the raw data so that others can calculate whatever likelihoods they need. Since this calculation deals with the *exact* results we got, rather than summarizing it into some class or set of supposedly similar results, it puts a greater emphasis on reporting your exact experimental data to others.\n\n**Scientist:** Reporting raw data seems an important leg of a good strategy for fighting the replication crisis, on this we agree. I nonetheless don't understand what experimentalists are supposed to *do* with this \"X is Q times as likely as Y\" stuff.\n\n**Undergrad:** Seconded.\n\n**Bayesian:** Okay, so... this isn't trivial to describe without making you run through [1zq a whole introduction to Bayes' rule]--\n\n**Undergrad:** Great. Just what I need, another weird complicated 4-credit course on statistics.\n\n**Bayesian:** It's literally [693 a 1-hour read if you're good at math]. It just isn't literally *trivial* to understand with no prior introduction. Well, even with no introduction whatsoever, I may be able to fake it with statements that will *sound* like they might be reasonable--and the reasoning *is* valid, it just might not be obvious that it is. Anyway. It is a theorem of probability that the following is valid reasoning:\n\n*(the Bayesian takes a breath)*\n\n**Bayesian:** Suppose that Professor Plum and Miss Scarlet are two suspects in a murder. Based on their prior criminal convictions, we start out thinking that Professor Plum is twice as likely to have committed the murder as Miss Scarlet. We then discover that the victim was poisoned. We think that, assuming he committed a murder, Professor Plum would be 10% likely to use poison; assuming Miss Scarlet committed a murder, she would be 60% likely to use poison. So Professor Plum is around *one-sixth as likely* to use poison as Miss Scarlet. Then after observing the victim was poisoned, we should update to think Plum is around one-third as likely to have committed the murder as Scarlet: $2 \\times \\frac{1}{6} = \\frac{1}{3}.$\n\n**Undergrad:** Just to check, what do you mean by saying that \"Professor Plum is one-third as likely to have committed the murder as Miss Scarlet\"?\n\n**Bayesian:** I mean that if these two people are our only suspects, we think Professor Plum has a 1/4 probability of having committed the murder and Miss Scarlet has a 3/4 probability of being guilty. So Professor Plum's probability of guilt is one-third that of Miss Scarlet's.\n\n**Scientist:** Now *I'd* like to know what you mean by saying that Professor Plum had a 1/4 probability of committing the murder. Either Plum committed the murder or he didn't; we can't observe the murder be committed multiple times and Professor Plum doing it 1/4th of the time.\n\n**Bayesian:** Are we going there? I guess we're going there. My good Scientist, I mean that if you offered me either side of an even-money bet on whether Plum committed the murder, I'd bet that he didn't do it. But if you offered me a gamble that costs \\$1 if Professor Plum is innocent and pays out \\$5 if he's guilty, I'd cheerfully accept that gamble. We only ran the 2012 US Presidential Election one time, but that doesn't mean that on November 7th you should've refused a \\$10 bet that paid out \\$1000 if Obama won. In general when prediction markets and large liquid betting pools put 60% betting odds on somebody winning the presidency, that outcome tends to happen 60% of the time; they are *well-calibrated* for probabilities in that range. If they were systematically uncalibrated--if in general things happened 80% of the time when prediction markets said 60%--you could use that fact to pump money out of prediction markets. And your pumping out that money would adjust the prediction-market prices until they were well-calibrated. If things to which prediction markets assign 70% probability happen around 7 times out of 10, why insist for reasons of ideological purity that the probability statement is meaningless?\n\n**Undergrad:** I admit, that *sounds* to me like it makes sense, if it's not just the illusion of understanding due to my failing to grasp some far deeper debate.\n\n**Bayesian:** There is indeed a [4y9 deeper debate], but what the deeper debate works out to is that your illusion of understanding is pretty much accurate as illusions go.\n\n**Scientist:** Yeah, I'm going to want to come back to that issue later. What if there are two agents who both seem 'well-calibrated' as you put it, but one agent says 60% and the other agent says 70%?\n\n**Bayesian:** If I flip a coin and don't look at it, so that I don't know yet if it came up heads or tails, then my ignorance about the coin isn't a fact about the coin, it's a fact about me. Ignorance exists in the mind, not in the environment. A blank map does not correspond to a blank territory. If you peek at the coin and I don't, it's perfectly reasonable for the two of us to occupy different states of uncertainty about the coin. And given that I'm not absolutely certain, I can and should quantify my uncertainty using probabilities. There's like [7ry 300 different theorems] showing that I'll get into trouble if my state of subjective uncertainty *cannot* be viewed as a coherent probability distribution. You kinda pick up on the trend after just the fourth time you see a slightly different clever proof that any violation of the standard probability axioms will cause the graves to vomit forth their dead, the seas to turn red as blood, and the skies to rain down dominated strategies and combinations of bets that produce certain losses--\n\n**Scientist:** Sorry, I shouldn't have said anything just then. Let's come back to this later? I'd rather hear first what you think we should do with the likelihoods once we have them.\n\n**Bayesian:** On the laws of probability theory, those likelihood functions *are* the evidence. They are the objects that send our prior odds of 2 : 1 for Plum vs. Scarlet to posterior odds of 1 : 3 for Plum vs. Scarlet. For any two hypotheses you care to name, if you tell me the relative likelihoods of the data given those hypotheses, I know how to update my beliefs. If you change your beliefs in any other fashion, the skies shall rain dominated strategies etcetera. Bayes' theorem: It's not just a statistical method, it's the LAW.\n\n**Undergrad:** I'm sorry, I still don't understand. Let's say we do an experiment and find data that's 6 times as likely if Professor Plum killed Mr. Boddy than if Miss Scarlet did. Do we arrest Professor Plum?\n\n**Scientist:** My guess is that you're supposed to make up a 'prior probability' that sounds vaguely plausible, like 'a priori, I think Professor Plum is 20% likely to have killed Mr. Boddy'. Then you combine that with your 6 : 1 likelihood ratio to get 3 : 2 posterior odds that Plum killed Mr. Boddy. So your paper reports that you've established a 60% posterior probability that Professor Plum is guilty, and the legal process does whatever it does with that.\n\n**Bayesian:** *No.* Dear God, no! Is that really what people think Bayesianism is?\n\n**Scientist:** It's not? I did always hear that the strength of Bayesianism is that it gives us posterior probabilities, which p-values don't actually do, and that the big weakness was that it got there by making up prior probabilities more or less out of thin air, which means that nobody will ever be able to agree on what the posteriors are.\n\n**Bayesian:** Science papers should report *likelihoods.* Or rather, they should report the raw data and helpfully calculate some likelihood functions on it. Not posteriors, never posteriors. \n\n**Undergrad:** What's a posterior? I'm trusting both of you to avoid the obvious joke here.\n\n**Bayesian:** A [1rp posterior probability] is when you say, \"There's a 60% probability that Professor Plum killed Mr. Boddy.\" Which, as the Scientist points out, is something you never get from p-values. It's also something that, in my own opinion, should never be reported in an experimental paper, because it's not *the result of an experiment.*\n\n**Undergrad:** But... okay, Scientist, I'm asking you this one. Suppose we see data statistically significant at p<0.01, something we're less than 1% probable to see if the null hypothesis \"Professor Plum didn't kill Mr. Boddy\" is true. Do we arrest him?\n\n**Scientist:** First of all, that's not a realistic null hypothesis. A null hypothesis is something like \"Nobody killed Mr. Boddy\" or \"All suspects are equally guilty.\" But even if what you just said made sense, even if we could reject Professor Plum's innocence at p<0.01, you still can't say anything like, \"It is 99% probable that Professor Plum is guilty.\" That is just not what p-values mean.\n\n**Undergrad:** Then what *do* p-values mean?\n\n**Scientist:** They mean we saw data inside a preselected class of possible results, which class, as a whole, is less than 1% likely to be produced if the null hypothesis is true. That's *all* that it means. You can't go from there to \"Professor Plum is 99% likely to be guilty,\" for reasons the Bayesian is probably better at explaining. You can't go from there to anywhere that's someplace else. What you heard is what there is.\n\n**Undergrad:** Now I'm doubly confused. I don't understand what we're supposed to do with p-values *or* likelihood ratios. What kind of experiment does it take to throw Professor Plum in prison?\n\n**Scientist:** Well, realistically, if you get a couple more experiments at different labs also saying p<0.01, Professor Plum *is* probably guilty.\n\n**Bayesian:** And the 'replication crisis' is that it turns out he's *not* guilty.\n\n**Scientist:** Pretty much.\n\n**Undergrad:** That's not exactly reassuring.\n\n**Scientist:** Experimental science is not for the weak of nerve.\n\n**Undergrad:** So... Bayesian, are you about to say similarly that once you get an extreme enough likelihood ratio, say, anything over 100 to 1, or something, you can probably take something as true?\n\n**Bayesian:** No, it's a bit more complicated than that. Let's say I flip a coin 20 times and get HHHTHHHTHTHTTHHHTHHHTTHT. Well, the hypothesis \"This coin was rigged to produce exactly HHHTHHHTHTHTTHHHTHHHTTHT\" has a likelihood advantage of roughly a million-to-one over the hypothesis \"this is a fair coin\". On any reasonable system, unless you wrote down that single hypothesis in advance and handed it to me in an envelope and didn't write down any other hypotheses or hand out any other envelopes, we'd say the hypothesis \"This coin was rigged to produce HHHTHHHTHTHTTHHHTHHHTTHT\" has a complexity penalty of at *least* $2^{20} : 1$ because it takes 20 bits just to describe what the coin is rigged to do. In other words, the penalty to prior plausibility more than cancels out a million-to-one likelihood advantage. And that's just the start of the issues. But, *with* that said, I think there's a pretty good chance you could do okay out of just winging it, once you understood in an intuitive and common-sense way how Bayes' rule worked. If there's evidence pointing to Professor Plum with a likelihood of 1,000 : 1 over any other suspects you can think of, in a field that probably only contained six suspects to begin with, you can figure that the prior odds against Plum weren't much more extreme than 10 : 1 and that you can legitimately be at least 99% sure now.\n\n**Scientist:** But you say that this is *not* something you should report in the paper.\n\n**Bayesian:** That's right. How can I put this... one of the great commandments of Bayesianism is that you ought to take into account *all* the relevant evidence you have available; you can't exclude some evidence from your calculation just because you don't like it. Besides sounding like common sense, this is also a rule you have to follow to prevent your calculations from coming up with paradoxical results, and there are various particular problems where there's a seemingly crazy conclusion and the answer is, \"Well, you *also* need to condition on blah blah blah.\" My point being, how do I, as an experimentalist, know what *all the relevant evidence* is? Who am I to calculate a posterior? Maybe somebody else published a paper that includes more evidence, with more likelihoods to be taken into account, and I haven't heard about it yet, but somebody else has. I just contribute my own data and its likelihood function--that's all! It's not my place to claim that I've collected *all* the relevant evidence and can now calculate posterior odds, and even if I could, somebody else could publish another paper a week later and the posterior odds would change again.\n\n**Undergrad:** So, roughly your answer is, \"An experimentalist just publishes the paper and calculates the likelihood thingies for that dataset, and then somebody outside has to figure out what to do with the likelihood thingies.\"\n\n**Bayesian:** Somebody outside has to set up priors--probably just reasonable-sounding ignorance priors, maximum entropy stuff or complexity-based penalties or whatever--then try to make sure they've collected all the evidence, apply the likelihood functions, check to see if the result [227 makes sense], etcetera. And then they might have to revise that estimate if somebody publishes a new paper a week later--\n\n**Undergrad:** That sounds *awful.*\n\n**Bayesian:** It would be awful if we were doing meta-analyses of p-values. Bayesian updates are a *hell* of a lot simpler! Like, you literally just [1zg multiply] the old posterior by the new likelihood [1zj function] and normalize. If experiment 1 has a likelihood ratio of 4 for hypothesis A over hypothesis B, and experiment 2 has a likelihood ratio of 9 for A over B, the two experiments together have a likelihood ratio of 36.\n\n**Undergrad:** And you can't do that with p-values, I mean, a p-value of 0.05 and a p-value of 0.01 don't multiply out to p<0.0005--\n\n**Scientist:** *No.*\n\n**Bayesian:** I should like to take this moment to call attention to my superior smile.\n\n**Scientist:** I am still worried about the part of this process where somebody gets to make up prior probabilities.\n\n**Bayesian:** Look, that just corresponds to the part of the process where somebody decides that, having seen 1 discovery and 2 replications with p<0.01, they are willing to buy the new pill or whatever.\n\n**Scientist:** So your reply there is, \"It's subjective, but so is what you do when you make decisions based on having seen some experiments with p-values.\" Hm. I was going to say something like, \"If I set up a rule that says I want data with p<0.001, there's no further objectivity beyond that,\" but I guess you'd say that my asking for p<0.001 instead of p<0.0001 corresponds to my pulling a prior out of my butt?\n\n**Bayesian**: Well, except that asking for a particular p-value is not actually as good as pulling a prior out of your butt. One of the first of those 300 theorems proving *doom* if you violate probability axioms, was Abraham Wald's \"complete class theorem\" in 1947. Wald set out to investigate all the possible *admissible strategies,* where a strategy is a way of acting differently based on whatever observations you make, and different actions get different payoffs in different possible worlds. Wald termed an *admissible strategy* a strategy which was not dominated by some other strategy across all possible measures you could put on the possible worlds. Wald found that the class of admissible strategies was simply the class that corresponded to having a probability distribution, doing Bayesian updating on observations, and maximizing expected payoff.\n\n**Undergrad:** Can you perhaps repeat that in slightly smaller words?\n\n**Bayesian:** If you want to do different things depending on what you observe, and get different payoffs depending on what the real facts are, *either* your strategy can be seen as having a probability distribution and doing Bayesian updating, *or* there's another strategy that does better given at least some possible measures on the worlds and never does worse. So if you say anything as wild as \"I'm waiting to see data with p<0.0001 to ban smoking,\" in principle there must be some way of saying something along the lines of, \"I have a prior probability of 0.01% that smoking causes cancer, let's see those likelihood functions\" which does at least as well or better no matter what anyone else would say as their own prior probabilities over the background facts.\n\n**Scientist:** Huh.\n\n**Bayesian:** Indeed. And that was when the Bayesian revolution very slowly started; it's sort of been gathering steam since then. It's worth noting that Wald only proved his theorem a couple of decades after \"p-values\" were invented, which, from my perspective, helps explain how science got wedged into its peculiar current system.\n\n**Scientist:** So you think we should burn all p-values and switch to reporting all likelihood ratios all the time.\n\n**Bayesian:** In a word... yes.\n\n**Scientist:** I'm suspicious, in general, of one-size-fits-all solutions like that. I suspect you--I hope this is not too horribly offensive--I suspect you of idealism. In my experience, different people need different tools from the toolbox at different times, and it's not wise to throw out all the tools in your toolbox except one.\n\n**Bayesian:** Well, let's be clear where I am and amn't idealistic, then. Likelihood functions cannot solve the entire replication crisis. There are aspects of this that can't be solved by using better statistics. Open access journals aren't something that hinge on p-values versus likelihood functions. The broken system of peer commentary, presently in the form of peer review, is not something likelihood functions can solve.\n\n**Scientist:** But likelihood functions will solve everything else?\n\n**Bayesian:** No, but they'll at least *help* on a surprising amount. Let me count the ways:\n\n**Bayesian:** One. Likelihood functions don't distinguish between 'statistically significant' results and 'failed' replications. There are no 'positive' and 'negative' results. What used to be called the null hypothesis is now just another hypothesis, with nothing special about it. If you flip a coin and get HHTHTTTHHH, you have not \"failed to reject the null hypothesis with p<0.05\" or \"failed to replicate\". You have found experimental data that favors the fair-coin hypothesis over the 5/6ths-heads hypothesis with a likelihood ratio of 3.78 : 1. This may help to fight the file-drawer effect--not entirely, because there is a mindset in the journals of 'positive' results and biased coins being more exciting than fair coins, and we need to tackle that mindset directly. But the p-value system *encourages* that bad mindset. That's why p-hacking even exists. So switching to likelihoods won't fix everything right away, but it *sure will help.*\n\n**Bayesian:** Two. The system of likelihoods makes the importance of raw data clearer and will encourage a system of publishing the raw data whenever possible, because Bayesian analyses center around the probability of the *exact* data we saw, given our various hypotheses. The p-value system encourages you to think in terms of the data as being just one member of a class of 'equally extreme' results. There's a mindset here of people hoarding their precious data, which is not purely a matter of statistics. But the p-value system *encourages* that mindset by encouraging people to think of their result as part of some undistinguished class of 'equally or more extreme' values or whatever, and that its meaning is entirely contained in it being a 'positive' result that is 'statistically significant'.\n\n**Bayesian:** Three. The probability-theoretic view, or Bayesian view, makes it clear that different effect sizes are different hypotheses, as they must be, because they assign different probabilities to the exact observations we see. If one experiment finds a 'statistically significant' effect size of 0.4 and another experiment finds a 'statistically significant' effect size of 0.1 on whatever scale we're working in, the experiment *has not replicated* and we do not yet know what real state of affairs is generating our observations. This directly fights and negates the 'amazing shrinking effect size' phenomenon that is part of the replication crisis.\n\n**Bayesian:** Four. Working in likelihood functions makes it far easier to aggregate our data. It even helps to [227 point up] when our data is being produced under inconsistent conditions or when the true hypothesis is not being considered, because in this case we will find likelihood functions that end up being nearly zero everywhere, or where the best available hypothesis is achieving a much lower likelihood on the combined data than that hypothesis [227 expects itself to achieve]. It is a stricter concept of replication that helps quickly point up when different experiments are being performed under different conditions and yielding results incompatible with a single consistent phenomenon.\n\n**Bayesian:** Five. Likelihood functions are objective facts about the data which do not depend on your state of mind. You cannot deceive somebody by reporting likelihood functions unless you are literally lying about the data or omitting data. There's no equivalent of 'p-hacking'.\n\n**Scientist:** Okay, that last claim in particular strikes me as *very* suspicious. What happens if I want to persuade you that a coin is biased towards heads, so I keep flipping it until I randomly get to a point where there's a predominance of heads, and then choose to stop?\n\n**Bayesian:** \"Shrug,\" I say. You can't mislead me by telling me what a real coin actually did.\n\n**Scientist:** I'm asking you what happens if I keep flipping the coin, checking the likelihood each time, until I see that the current statistics favor my pet theory, and then I stop.\n\n**Bayesian:** As a pure idealist seduced by the seductively pure idealism of probability theory, I say that so long as you present me with the true data, all I can and should do is update in the way Bayes' theorem says I should.\n\n**Scientist:** Seriously.\n\n**Bayesian:** I am serious.\n\n**Scientist:** So it doesn't bother you if I keep checking the likelihood ratio and continuing to flip the coin until I can convince you of anything I want.\n\n**Bayesian:** Go ahead and try it.\n\n**Scientist:** What I'm actually going to do is write a Python program which simulates flipping a fair coin *up to* 300 times, and I'm going to see how many times I can get a 20:1 likelihood ratio falsely indicating that the coin is biased to come up heads 55% of the time... why are you smiling?\n\n**Bayesian:** I wrote pretty much the same Python program when I was first converting to Bayesianism and finding out about likelihood ratios and feeling skeptical about the system maybe being abusable in some way, and then a friend of mine found out about likelihood ratios and *he* wrote [essentially the same program, also in Python](https://gist.github.com/Soares/941bdb13233fd0838f1882d148c9ac14). And lo, he found that false evidence of 20:1 for the coin being 55% biased was found at least once, somewhere along the way... 1.4% of the time. If you asked for more extreme likelihood ratios, the chances of finding them dropped off even faster.\n\n**Scientist:** Okay, that's not bad by the p-value way of looking at things. But what if there's some more clever way of biasing it?\n\n**Bayesian:** When I was... I must have been five years old, or maybe even younger, and first learning about addition, one of the earliest childhood memories I have at all, is of adding 3 to 5 by counting 5, 6, 7 and believing that you could get different results from adding numbers depending on exactly how you did it. Which is cute, yes, and also indicates a kind of exploring, of probing, that was no doubt important in my starting to understand addition. But you still look back and find it humorous, because now you're a big grownup and you know you can't do that. My writing Python programs to try to find clever ways to fool myself by repeatedly checking the likelihood ratios was the same, in the sense that after I matured a bit more as a Bayesian, I realized that the feat I'd written those programs to try to do was *obviously* impossible. In the same way that trying to find a clever way to break apart the 3 into 2 and 1, and trying to add them separately to 5, and then trying to add the 1 and then the 2, in hopes you can get to 7 or 9 instead of 8, is just never ever going to work. The results in arithmetic are *theorems,* and it doesn't matter in what clever order you switch things up, you are never going to get anything except 8 when you carry out an operation that is validly equivalent to adding 3 plus 5. The theorems of probability theory are also theorems. If your Python program had actually worked, it would have produced a contradiction in probability theory, and thereby a contradiction in Peano Arithmetic, which provides a model for probability theory carried out using rational numbers. The thing you tried to do is *exactly* as hard as adding 3 and 5 using the standard arithmetic axioms and getting 7.\n\n**Undergrad:** Uh, why?\n\n**Scientist:** Seconded.\n\n**Bayesian:** Because letting $e$ denote the evidence, $H$ denote the hypothesis, $\\neg$ denote the negation of a proposition, $\\mathbb P(X)$ denote the probability of proposition $X$, and $\\mathbb P(X \\mid Y)$ denote the [-1rj] of $X$ assuming $Y$ to be true, it is a theorem of probability that $$\\mathbb P(H) = \\left(P(H \\mid e) \\cdot P(e)\\right) + \\left(P(H\\mid \\neg e) \\cdot P(\\neg e)\\right).$$ Therefore likelihood functions can *never* be p-hacked by *any possible* clever setup without you outright lying, because you can't have any possible procedure that a Bayesian knows in advance will make them update in a predictable net direction. For every update that we expect to be produced by a piece of evidence $e,$ there's an equal and opposite update that we expect to probably occur from seeing $\\neg e.$\n\n**Undergrad:** What?\n\n**Scientist:** Seconded.\n\n**Bayesian:** Look... let me try to zoom out a bit, and yes, look at the ongoing replication crisis. The Scientist proclaimed suspicion of grand new sweeping ideals. Okay, but the shift to likelihood functions is the kind of thing that *ought* to be able to solve a lot of problems at once. Let's say... I'm trying to think of a good analogy here. Let's say there's a corporation which is having a big crisis because their accountants are using floating-point numbers, only there's three different parts of the firm using three different representations of floating-point numbers to do numerically unstable calculations. Somebody starts with 1.0 and adds 0.0001 a thousand times and then subtracts 0.1 and gets 0.999999999999989. Or you can go to the other side of the building and use a different floating-point represenatation and get a different result. And nobody has any conception that there's anything wrong with this. Suppose there are BIG errors in the floating-point numbers, they're using the floating-point-number equivalent of crude ideograms and Roman numerals, you can get big pragmatic differences depending on what representation you use. And naturally, people 'division-hack' to get whatever financial results they want. So all the spreadsheets are failing to replicate, and people are starting to worry the 'cognitive priming' subdivision has actually been bankrupt for 20 years. And then one day you come in and you say, \"Hey. Everyone. Suppose that instead of these competing floating-point representations, we use my new representation instead. It can't be fooled the same way, which will solve a surprising number of your problems.\"\n\n*(The **Bayesian** now imitates the **Scientist's** voice:)* \"I'm suspicious,\" says the Senior Auditor. \"I suspect you of idealism. In my experience, people need to use different floating-point representations for different financial problems, and it's good to have a lot of different numerical representations of fractions in your toolbox.\"\n\n**Bayesian:** \"Well,\" I reply, \"it may sound idealistic, but in point of fact, this thing I'm about to show you is *the* representation of fractions, in which you *cannot* get different results depending on which way you add things or what order you do the operations in. It might be slightly more computationally expensive, but it is now no longer 1920 like when you first adopted the old system, and seriously, you can afford the computing power in a very large fraction of cases where you're only working with 30,000,000 bank accounts or some trivial number like that. Yes, if you want to do something like take square roots, it gets a bit more complicated, but very few of you are actually taking the square root of bank account balances. For the vast majority of things you are trying to do on a day-to-day basis, this system is unhackable without actually misreporting the numbers.\" And then I show them how to represent arbitrary-magnitude finite integers precisely, and how to represent a rational number as the ratio of two integers. What we would, nowadays, consider to be a direct, precise, computational representation of *the* system of rational numbers. The one unique axiomatized mathematical system of rational numbers, to which floating-point numbers are a mere approximation. And if you're just working with 30,000,000 bank account balances and your crude approximate floating-point numbers are *in practice* blowing up and failing to replicate and being exploited by people to get whatever results they want, and it is no longer 1920 and you can afford real computers now, it is an obvious step to have all the accountants switch to using *the* rational numbers. Just as Bayesian updates are *the* rational updates, in the unique mathematical axiomatized system of probabilities. And that's why you can't p-hack them.\n\n**Scientist:** That is a rather... audacious claim. And I confess, even if everything you said about the math were true, I would still be skeptical of the pragmatics. The current system of scientific statistics is something that's grown up over time and matured. Has this bright Bayesian way actually been tried?\n\n**Bayesian:** It hasn't been tried very much in science. In machine learning, where, uh, not to put too fine a point on it, we can actually see where the models are breaking because our AI doesn't work, it's been ten years since I've read a paper that tries to go at things from a frequentist angle and I can't *ever* recall seeing an AI algorithm calculate the p-value of anything. If you're doing anything principled at all from a probability-theoretic stance, it's probably Bayesian, and pretty much never frequentist. If you're classifying data using n-hot encodings, your loss function is the cross-entropy, not... I'm not even sure *what* the equivalent of trying to use 1920s-style p-values in AI would be like. I would frankly attribute this to people in machine learning having to use statistical tools that visibly succeed or fail; rather than needing to get published by going through a particular traditional ritual of p-value reporting, and failure to replicate not being all that bad for your career.\n\n**Scientist:** So you're actually more of a computer science guy than an experimentalist yourself. Why does this not surprise me? It's not impossible that some better statistical system than p-values could exist, but I'd advise you to respect the wisdom of experience. The fact that we know what p-hacking is, and are currently fighting it, is because we've had time to see where the edges of the system have problems, and we're figuring out how to fight those problems. This shiny new system will also have problems; you just have no idea what they'll be. Perhaps they'll be worse.\n\n**Bayesian:** It's not impossible that the accountants would figure out new shenanigans to pull with rational numbers, especially if they were doing some things computationally intensive enough that they could no longer afford to use *the* rational numbers and had to use some approximation instead. But I stand by my statement that if your financial spreadsheets are *right now* blowing up in a giant replication crisis in ways that seem clearly linked to using p-values, and the p-values are, frankly, bloody ad-hoc inconsistent nonsense, an obvious first step is to *try* using the rational updates instead. Although, it's possible we don't disagree too much in practice. I'd also pragmatically favor trying to roll things out one step at a time, like, maybe just switch over the psychological sciences and see how that goes.\n\n**Scientist:** How would you persuade them to do that?\n\n**Bayesian:** I have no goddamn idea. Honestly, I'm not expecting anyone to actually fix anything. People will just go on using p-values until the end of the world, probably. It's just one more Nice Thing We Can't Have. But there's a *chance* the idea will catch on. I was pleasantly surprised when open access caught on as quickly as it did. I was pleasantly surprised when people, like, actually noticed the replication crisis and it became a big issue that people cared about. Maybe I'll be pleasantly surprised again and people will actually take up the crusade to bury the p-value at a crossroads at midnight and put a stake through its heart. If so, I'll have done my part by making an understanding of [1lz Bayes' rule] and likelihoods [1zq more accessible] to everyone.\n\n**Scientist:** Or it could turn out that people don't *like* likelihoods, and that part of the wisdom of experience is the lesson that p-values are a kind of thing that experimentalists actually find useful and easy to use.\n\n**Bayesian:** If the experience of learning traditional statistics traumatized them so heavily that the thought of needing to learn a new system sends them screaming into the night, then yes, change might need to be imposed from outside. I'm hoping though that the Undergrad will read a [1zq short, cheerful introduction to Bayesian probability], compare this with his ominous heavy traditional statistics textbook, and come back going \"Please let me use likelihoods please let me use likelihoods oh god please let me use likelihoods.\"\n\n**Undergrad:** I'll guess I'll look into it and see?\n\n**Bayesian:** Weigh your decision carefully, Undergrad. Some changes in science depend upon students growing up familiar with multiple ideas and choosing the right one. Max Planck said so in a famous aphorism, so it must be true. Ergo, the entire ability of science to distinguish good and bad ideas within that class must rest upon the cognitive capacities of undergrads.\n\n**Scientist:** Oh, now that is just--\n\n**Moderator:** And we're out of time. Thanks for joining us, everyone!",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 4,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 44,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [
"2",
"32",
"5",
"267",
"8t7",
"8zq",
"9bv"
],
"childIds": [],
"parentIds": [
"2",
"4zd"
],
"commentIds": [
"6d6",
"6d8",
"909",
"9hy",
"9j0",
"9j1",
"9j2"
],
"questionIds": [],
"tagIds": [
"1r8",
"4v",
"4vr",
"60p"
],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [
{
"id": "4622",
"parentId": "1x5",
"childId": "4xx",
"type": "requirement",
"creatorId": "2",
"createdAt": "2016-06-30 03:00:39",
"level": 1,
"isStrong": false,
"everPublished": true
},
{
"id": "4623",
"parentId": "4vr",
"childId": "4xx",
"type": "requirement",
"creatorId": "2",
"createdAt": "2016-06-30 03:00:47",
"level": 1,
"isStrong": false,
"everPublished": true
}
],
"subjects": [],
"lenses": [],
"lensParentId": "4zd",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23129",
"pageId": "4xx",
"userId": "9bv",
"edit": 44,
"type": "newEditProposal",
"createdAt": "2018-11-21 16:50:14",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23040",
"pageId": "4xx",
"userId": "2",
"edit": 43,
"type": "newEdit",
"createdAt": "2018-07-02 01:10:21",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23039",
"pageId": "4xx",
"userId": "2",
"edit": 42,
"type": "newEdit",
"createdAt": "2018-07-02 01:07:07",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23038",
"pageId": "4xx",
"userId": "2",
"edit": 41,
"type": "newEdit",
"createdAt": "2018-07-02 01:04:26",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23037",
"pageId": "4xx",
"userId": "2",
"edit": 40,
"type": "newEdit",
"createdAt": "2018-07-02 01:02:46",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23036",
"pageId": "4xx",
"userId": "2",
"edit": 39,
"type": "newEdit",
"createdAt": "2018-07-02 01:02:16",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "23035",
"pageId": "4xx",
"userId": "2",
"edit": 38,
"type": "newEdit",
"createdAt": "2018-07-02 01:01:17",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22979",
"pageId": "4xx",
"userId": "8zq",
"edit": 37,
"type": "newEdit",
"createdAt": "2018-02-12 00:24:13",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22868",
"pageId": "4xx",
"userId": "8t7",
"edit": 35,
"type": "newEditProposal",
"createdAt": "2017-11-13 03:17:49",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22745",
"pageId": "4xx",
"userId": "2",
"edit": 34,
"type": "newEdit",
"createdAt": "2017-09-02 23:00:54",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22744",
"pageId": "4xx",
"userId": "2",
"edit": 33,
"type": "newEdit",
"createdAt": "2017-09-02 22:59:45",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22743",
"pageId": "4xx",
"userId": "2",
"edit": 32,
"type": "newEdit",
"createdAt": "2017-09-02 22:58:47",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22742",
"pageId": "4xx",
"userId": "2",
"edit": 31,
"type": "newEdit",
"createdAt": "2017-09-02 22:56:58",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22741",
"pageId": "4xx",
"userId": "2",
"edit": 30,
"type": "newEdit",
"createdAt": "2017-09-02 22:56:42",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22740",
"pageId": "4xx",
"userId": "2",
"edit": 29,
"type": "newEdit",
"createdAt": "2017-09-02 22:51:10",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "3718",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 1,
"dislikeCount": 0,
"likeScore": 1,
"individualLikes": [],
"id": "20362",
"pageId": "4xx",
"userId": "267",
"edit": 28,
"type": "newEdit",
"createdAt": "2016-11-21 19:27:09",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20059",
"pageId": "4xx",
"userId": "32",
"edit": 27,
"type": "newEdit",
"createdAt": "2016-10-11 17:38:51",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20058",
"pageId": "4xx",
"userId": "32",
"edit": 26,
"type": "newEdit",
"createdAt": "2016-10-11 17:14:18",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20045",
"pageId": "4xx",
"userId": "2",
"edit": 24,
"type": "newEdit",
"createdAt": "2016-10-11 07:35:52",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20044",
"pageId": "4xx",
"userId": "2",
"edit": 23,
"type": "newEdit",
"createdAt": "2016-10-11 07:33:41",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20043",
"pageId": "4xx",
"userId": "2",
"edit": 22,
"type": "newEdit",
"createdAt": "2016-10-11 07:33:05",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "19979",
"pageId": "4xx",
"userId": "2",
"edit": 0,
"type": "newTag",
"createdAt": "2016-10-10 07:24:11",
"auxPageId": "4v",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "19337",
"pageId": "4xx",
"userId": "1yq",
"edit": 0,
"type": "newTag",
"createdAt": "2016-08-27 22:36:51",
"auxPageId": "60p",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "16369",
"pageId": "4xx",
"userId": "32",
"edit": 21,
"type": "newEdit",
"createdAt": "2016-07-10 13:04:37",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "16368",
"pageId": "4xx",
"userId": "32",
"edit": 20,
"type": "newEdit",
"createdAt": "2016-07-10 13:01:33",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "16349",
"pageId": "4xx",
"userId": "5",
"edit": 19,
"type": "newEdit",
"createdAt": "2016-07-10 06:55:54",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "16347",
"pageId": "4xx",
"userId": "5",
"edit": 18,
"type": "newEdit",
"createdAt": "2016-07-10 06:12:35",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": "I think this is easier to read with \"Bayesian:\" and \"Frequentist:\" bolded. Feel free to revert if you don't agree"
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15008",
"pageId": "4xx",
"userId": "32",
"edit": 0,
"type": "newParent",
"createdAt": "2016-07-01 01:52:56",
"auxPageId": "4zd",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14994",
"pageId": "4xx",
"userId": "5",
"edit": 17,
"type": "newEdit",
"createdAt": "2016-06-30 20:10:28",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14987",
"pageId": "4xx",
"userId": "32",
"edit": 15,
"type": "newEdit",
"createdAt": "2016-06-30 18:23:06",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14986",
"pageId": "4xx",
"userId": "32",
"edit": 14,
"type": "newEdit",
"createdAt": "2016-06-30 18:17:12",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14976",
"pageId": "4xx",
"userId": "32",
"edit": 13,
"type": "newEdit",
"createdAt": "2016-06-30 16:20:14",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14957",
"pageId": "4xx",
"userId": "32",
"edit": 12,
"type": "newEdit",
"createdAt": "2016-06-30 08:00:26",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14956",
"pageId": "4xx",
"userId": "32",
"edit": 11,
"type": "newEdit",
"createdAt": "2016-06-30 07:59:59",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14955",
"pageId": "4xx",
"userId": "32",
"edit": 10,
"type": "newEdit",
"createdAt": "2016-06-30 07:59:11",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14937",
"pageId": "4xx",
"userId": "2",
"edit": 9,
"type": "newEdit",
"createdAt": "2016-06-30 05:06:20",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14936",
"pageId": "4xx",
"userId": "2",
"edit": 8,
"type": "newEdit",
"createdAt": "2016-06-30 05:05:55",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14935",
"pageId": "4xx",
"userId": "2",
"edit": 7,
"type": "newEdit",
"createdAt": "2016-06-30 05:05:34",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14934",
"pageId": "4xx",
"userId": "2",
"edit": 6,
"type": "newEdit",
"createdAt": "2016-06-30 05:04:43",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14933",
"pageId": "4xx",
"userId": "2",
"edit": 5,
"type": "newEdit",
"createdAt": "2016-06-30 05:04:01",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14932",
"pageId": "4xx",
"userId": "2",
"edit": 4,
"type": "newEdit",
"createdAt": "2016-06-30 04:55:14",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14927",
"pageId": "4xx",
"userId": "2",
"edit": 3,
"type": "newEdit",
"createdAt": "2016-06-30 04:13:38",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14926",
"pageId": "4xx",
"userId": "2",
"edit": 2,
"type": "newEdit",
"createdAt": "2016-06-30 04:12:14",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14891",
"pageId": "4xx",
"userId": "2",
"edit": 0,
"type": "newTag",
"createdAt": "2016-06-30 03:01:41",
"auxPageId": "4vr",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14892",
"pageId": "4xx",
"userId": "2",
"edit": 0,
"type": "newTag",
"createdAt": "2016-06-30 03:01:41",
"auxPageId": "1r8",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14893",
"pageId": "4xx",
"userId": "2",
"edit": 0,
"type": "newRequirement",
"createdAt": "2016-06-30 03:01:41",
"auxPageId": "1x5",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14894",
"pageId": "4xx",
"userId": "2",
"edit": 0,
"type": "newRequirement",
"createdAt": "2016-06-30 03:01:41",
"auxPageId": "4vr",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14890",
"pageId": "4xx",
"userId": "2",
"edit": 0,
"type": "newParent",
"createdAt": "2016-06-30 03:01:40",
"auxPageId": "2",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "14888",
"pageId": "4xx",
"userId": "2",
"edit": 1,
"type": "newEdit",
"createdAt": "2016-06-30 03:01:39",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
}
],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {
"improveStub": {
"likeableId": "3746",
"likeableType": "contentRequest",
"myLikeValue": 0,
"likeCount": 1,
"dislikeCount": 0,
"likeScore": 1,
"individualLikes": [],
"id": "166",
"pageId": "4xx",
"requestType": "improveStub",
"createdAt": "2016-12-04 13:32:43"
},
"speedUp": {
"likeableId": "3496",
"likeableType": "contentRequest",
"myLikeValue": 0,
"likeCount": 1,
"dislikeCount": 0,
"likeScore": 1,
"individualLikes": [],
"id": "78",
"pageId": "4xx",
"requestType": "speedUp",
"createdAt": "2016-09-08 06:21:51"
}
}
},
"4y9": {
"likeableId": "2889",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4y9",
"edit": 6,
"editSummary": "",
"prevEdit": 5,
"currentEdit": 6,
"wasPublished": true,
"type": "wiki",
"title": "Interpretations of \"probability\"",
"clickbait": "What does it *mean* to say that a fair coin has a 50% probability of coming up heads?",
"textLength": 12182,
"alias": "probability_interpretations",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "32",
"editCreatedAt": "2016-07-01 07:22:14",
"pageCreatorId": "32",
"pageCreatedAt": "2016-06-30 07:36:22",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 898,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4yg": {
"likeableId": "2907",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4yg",
"edit": 9,
"editSummary": "",
"prevEdit": 8,
"currentEdit": 9,
"wasPublished": true,
"type": "wiki",
"title": "Arbital quality",
"clickbait": "Arbital's system for tracking page quality.",
"textLength": 721,
"alias": "arbital_quality",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-16 01:23:21",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-06-30 02:21:25",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 99,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4ym": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4ym",
"edit": 2,
"editSummary": "",
"prevEdit": 1,
"currentEdit": 2,
"wasPublished": true,
"type": "wiki",
"title": "Unassessed",
"clickbait": "This page's quality has not been assessed.",
"textLength": 134,
"alias": "unassessed_meta_tag",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-06-30 04:35:45",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-06-30 02:32:42",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 46,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4zd": {
"likeableId": "2908",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 10,
"dislikeCount": 0,
"likeScore": 10,
"individualLikes": [
"1ch",
"2cl",
"2h6",
"2vh",
"32",
"345",
"34s",
"4tg",
"6jy",
"9bk"
],
"pageId": "4zd",
"edit": 23,
"editSummary": "",
"prevEdit": 22,
"currentEdit": 23,
"wasPublished": true,
"type": "wiki",
"title": "Report likelihoods, not p-values",
"clickbait": "",
"textLength": 16927,
"alias": "likelihoods_not_pvalues",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "32",
"editCreatedAt": "2017-04-29 05:00:59",
"pageCreatorId": "32",
"pageCreatedAt": "2016-07-01 01:52:54",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 3883,
"text": "[summary: If scientists reported likelihood functions instead of p-values, this could help science avoid [https://en.wikipedia.org/wiki/Data_dredging p-hacking], [https://en.wikipedia.org/wiki/Publication_bias publication bias], [https://en.wikipedia.org/wiki/Decline_effect the decline effect], and other hazards of standard statistical techniques. Furthermore, it could help make it easier to combine results from multiple studies and perfrom meta-analyses, while making statistics intuitively easier to understand. (This is a bold claim, but a claim which is largely supported by [-1bv].)]\n\nThis page advocates for a change in the way that statistics is done in standard scientific journals. The key idea is to report [56s likelihood functions] instead of p-values, and this could have many benefits.\n\n_(Note: This page is a personal [60p opinion page].)_\n\n[toc:]\n\n## What's the difference?\n\nThe status quo across scientific journals is to test data for \"[statistically_significant statistical significance]\" using functions such as [p_value p-values]. A p-value is a number calculated from a hypothesis (called the \"null hypothesis\"), an experiment, a result, and a [-summary_statistic]. For example, if the null hypothesis is \"this coin is fair,\" and the experiment is \"flip it 6 times\", and the result is HHHHHT, and the summary statistic is \"the sequence has at least five H values,\" then the p-value is 0.11, which means \"if the coin were fair, and we did this experiment a lot, then only 11% of the sequences generated would have at least five H values.\"%%note:This does _not_ mean that the coin is 89% likely to be biased! For example, if the only alternative is that the coin is biased towards tails, then HHHHHT is evidence that it's fair. This is a common source of confusion with p-values.%% If the p-value is lower than an arbitrary threshold (usually $p < 0.05$) then the result is called \"statistically significant\" and the null hypothesis is \"rejected.\"\n\nThis page advocates that scientific articles should report _likelihood functions_ instead of p-values. A likelihood function for a piece of evidence $e$ is a function $\\mathcal L$ which says, for each hypothesis $H$ in some set of hypotheses, the probability that $H$ assigned to $e$, written [51n $\\mathcal L_e(H)$].%%note: Many authors write $\\mathcal L(H \\mid e)$ instead. We think this is confusing, as then $\\mathcal L(H \\mid e) = \\mathbb P(e \\mid H),$ and it's hard enough for students of statistics to keep \"probability of $H$ given $e$\" and \"probability of $e$ given $H$\" straight as it is if the notation _isn't_ swapped around every so often.%% For example, if $e$ is \"this coin, flipped 6 times, generated HHHHHT\", and the set of hypotheses are $H_{0.25} =$ \"the coin only produces heads 25% of the time\" and $H_{0.5}$ = \"the coin is fair\", then $\\mathcal L_e(H_{0.25})$ $=$ $0.25^5 \\cdot 0.75$ $\\approx 0.07\\%$ and $\\mathcal L_e(H_{0.5})$ $=$ $0.5^6$ $\\approx 1.56\\%,$ for a [1rq likelihood ratio] of about $21 : 1$ in favor of the coin being fair (as opposed to biased 75% towards tails).\n\nIn fact, with a single likelihood function, we can report the amount of support $e$ gives to _every_ hypothesis $H_b$ of the form \"the coin has bias $b$ towards heads\":%note:To learn how this graph was generated, see [1zj Bayes' rule: Functional form].%\n\n![](http://i.imgur.com/UwwxmCe.png)\n\nNote that this likelihood function is _not_ telling us the probability that the coin is actually biased, it is _only_ telling us how much the evidence supports each hypothesis. For example, this graph says that HHHHHT provides about 3.8 times as much evidence for $H_{0.75}$ over $H_{0.5}$, and about 81 times as much evidence for $H_{0.75}$ over $H_{0.25}.$\n\nNote also that the likelihood function doesn't necessarily contain the _right_ hypothesis; for example, the function above shows the support of $e$ for every possible bias on the coin, but it doesn't consider hypotheses like \"the coin alternates between H and T\". Likelihood functions, like p-values, are essentially a mere summary of the raw data — there is no substitute for the raw data when it comes to allowing people to test hypotheses that the original researchers did not consider. (In other words, even if you report likelihoods instead of p-values, it's still virtuous to share your raw data.)\n\nWhere p-values let you measure (roughly) how well the data supports a _single_ \"null hypothesis\", with an arbitrary 0.05 \"not well enough\" cutoff, the likelihood function shows the support of the evidence for lots and lots of different hypotheses at once, without any need for an arbitrary cutoff.\n\n## Why report likelihoods instead of p-values?\n\n__1. Likelihood functions are less arbitrary than p-values.__ To report a likelihood function, all you have to do is pick which hypothesis class to generate the likelihood function for. That's your only degree of freedom. This introduces one source of arbitrariness, and if someone wants to check some other hypothesis they still need access to the raw data, but it is better than the p-value case, where you only report a number for a single \"null\" hypothesis.\n\nFurthermore, in the p-value case, you have to pick not only a null hypothesis but also an experiment and a summary statistic, and these degrees of freedom can have a huge impact on the final report. These extra degrees of freedom are both unnecessary ([1lz to carry out a probabilistic update, all you need are your own personal beliefs and a likelihood function]) and exploitable, and empirically, they're actively harming scientific research.\n\n__2. Reporting likelihoods would solve p-hacking.__ If you're using p-values, then you can game the statistics via your choice of experiment and summary statistics. In the example with the coin above, if you say your experiment and summary statistic are \"flip the coin 6 times and count the number of heads\" then the p-value of HHHHHT with respect to $H_{0.5}$ is 0.11, whereas if you say your experiment and summary statistic are \"flip the coin until it comes up tails and count the number of heads\" then the p-value of HHHHHT with respect to $H_{0.5}$ is 0.03, which is \"significant.\" This is called \"[https://en.wikipedia.org/wiki/Data_dredging p-hacking]\", and it's a serious problem in modern science.\n\nIn a likelihood function, _the amount of support an evidence gives to a hypothesis does not depend on which experiment the researcher had in mind._ Likelihood functions depend only on the data you actually saw, and the hypotheses you chose to report. The only way to cheat a likelihood function is to lie about the data you collected, or refuse to report likelihoods for a particular hypothesis.\n\nIf your paper fails to report likelihoods for some obvious hypotheses, then (a) that's precisely analogous to you choosing the wrong null hypothesis to consider; (b) it's just as easily noticeable as when your paper considers the wrong null hypothesis; and (c) it can be easily rectified given access to the raw data. By contrast, p-hacking can be subtle and hard to detect after the fact.\n\n__3. Likelihood functions are very difficult to game.__ There is no analog of p-hacking for likelihood functions. This is a theorem of probability theory known as [-conservation_of_expected_evidence], which says that likelihood functions can't be gamed unless you're falsifying or omitting data (or screwing up the likelihood calculations).%note:Disclaimer: the theorem says likelihood functions can't be gamed, but we still shouldn't underestimate the guile of dishonest researchers struggling to make their results look important. Likelihood functions have not been put through the gauntlet of real scientific practice; p-values have. That said, when p-values were put through that gauntlet, they [https://en.wikipedia.org/wiki/Data_dredging failed] in a [https://en.wikipedia.org/wiki/Replication_crisis spectacular fashion]. When rebuilding, it's probably better to start from foundations that provably cannot be gamed.%\n\n\n__4. Likelihood functions would help stop the \"vanishing effect sizes\" phenomenon.__ The [https://en.wikipedia.org/wiki/Decline_effect decline effect] occurs when studies which reject a null hypothesis $H_0$ have effect sizes that get smaller and smaller and smaller over time (the more someone tries to replicate the result). This is [http://slatestarcodex.com/2014/04/28/the-control-group-is-out-of-control/ usually evidence] that there is no actual effect, and that the initial \"large effects\" were a result of publication bias.\n\nLikelihood functions help avoid the decline effect by _treating different effect sizes differently._ The likelihood function for coins of different biases shows that the evidence HHHHHT gives a different amount of support to $H_{0.52},$ $H_{0.61}$, and $H_{0.8}$ (which correspond to small, medium, and large effect sizes, respectively). If three different studies find low support for $H_{0.5},$ and one of them gives all of its support to the large effect, another gives all its support to the medium effect, and the third gives all of its support to the smallest effect, then likelihood functions reveal that something fishy is going on (because they're all peaked in different places).\n\nIf instead we only use p-values, and always decide whether or not to \"keep\" or \"reject\" the null hypothesis (without specifying how much support goes to different alternatives), then it's hard to notice that the studies are actually contradictory (and that something very fishy is going on). Instead, it's very tempting to exclaim \"3 out of 3 studies reject $H_{0.5}$!\" and move on.\n\n__5. Likelihood functions would help stop publication bias.__ When using p-values, if the data yields a p-value of 0.11 using a null hypothesis $H_0$, the study is considered \"insignificant,\" and many journals have a strong bias towards positive results. When reporting likelihood functions, there is no arbitrary \"significance\" threshold. A study that reports a relative likelihoods of $21 : 1$ in favor of $H_a$ vs $H_0,$ that's exactly the same _strength of evidence_ as a study that reports $21 : 1$ odds against $H_a$ vs $H_0.$ It's all just evidence, and it can all be added to the corpus, there's no arbitrary \"significance\" threshold.\n\n__6. Likelihood functions make it trivially easy to combine studies.__ When combining studies that used p-values, researchers have to perform complex meta-analyses with dozens of parameters to tune, and they often find [https://en.wikipedia.org/wiki/Confirmation_bias exactly what they were expecting to find]. By contrast, the way you combine multiple studies that reported likelihood functions is... (drumroll)\n...you just multiply the likelihood functions together. If study A reports that $H_{0.75}$ was favored over $H_{0.5}$ with a [-1rq] of $3.8 : 1$, and study B reports that $H_{0.75}$ was favored over $H_{0.5}$ at $5 : 1$, then the combined likelihood functions of both studies favors $H_{0.75}$ over $H_{0.5}$ at $(3.8 \\cdot 5) : 1$ $=$ $19 : 1.$\n\nWant to combine a hundred studies on the same subject? Multiply a hundred functions together. Done. No parameter tuning, no degrees of freedom through which bias can be introduced — just multiply.\n\n__7. Likelihood functions make it obvious when something has gone wrong.__ If, when you multiply all the likelihood functions together, _all_ hypotheses have extraordinarily low likelihoods, then something has gone wrong. Either a mistake has been made somewhere, or fraud has been committed, or the true hypothesis wasn't in the hypothesis class you're considering.\n\nThe actual hypothesis that explains all the data will have decently high likelihood across all the data. If none of the hypotheses fit that description, then either you aren't considering the right hypothesis yet, or some of the studies went wrong. (Try looking for one study that has a likelihood function _very very different_ from all the other studies, and investigate that one.)\n\nLikelihood functions won't do your science for you — you still have to generate good hypotheses, and be honest in your data reporting — but they _do_ make it obvious when something went wrong. (Specifically, [227 each hypothesis can tell you how low its likelihood is expected to be on the data], and if _every_ hypothesis has a likelihood far lower than expected, then something's fishy.)\n\n---\n\nA scientific community using likelihood functions would produce scientific research that's easier to use. If everyone's reporting likelihood functions, then all you personally need to do in order to figure out what to believe is take your own personal (subjective) prior probabilities and multiply them by all the likelihood functions in order to get your own personal (subjective) posterior probabilities.\n\nFor example, let's say you personally think the coin is probably fair, with $10 : 1$ odds of being fair as opposed to 75% biased in favor of heads. Now let's say that study A reports a likelihood function which favors $H_{0.75}$ over $H_{0.5}$ with a likelihood ratio of $3.8 : 1.$, and study B reports a $5 : 1$ likelihood ratio in the same direction. Multiplying all these together, your personal posterior beliefs should be $19 : 10$ in favor of $H_{0.75}$ over $H_{0.5}$. This is simply [1lz Bayes' rule]. Reporting likelihoods instead of p-values lets science remain objective, while allowing everyone to find their own personal posterior probabilities via a simple application of Bayes' theorem.\n\n## Why should we think this would work?\n\nThis may all sound too good to be true. Can one simple change really solve that many problems in modern science?\n\nFirst of all, you can be assured that reporting likelihoods instead of p-values would not \"solve\" all the problems above, and it would surely not solve all problems with modern experimental science. Open access to raw data, preregistration of studies, a culture that rewards replication, and many other ideas are also crucial ingredients to a scientific community that zeroes in on truth.\n\nHowever, reporting likelihoods would _help_ solve lots of different problems in modern experimental science. This may come as a surprise. Aren't likelihood functions just one more statistical technique, just another tool for the toolbox? Why should we think that one single tool can solve that many problems?\n\nThe reason lies in [-1bv]. According to the axioms of probability theory, there is only one good way to account for evidence when updating your beliefs, and that way is via likelihood functions. Any other method is subject to inconsistencies and pathologies, as per the [probability_coherence_theorems coherence theorems of probability theory].\n\nIf you're manipulating equations like $2 + 2 = 4,$ and you're using methods that may or may not let you throw in an extra 3 on the right hand side (depending on the arithmetician's state of mind), then it's no surprise that you'll occasionally get yourself into trouble and deduce that $2 + 2 = 7.$ The laws of arithmetic show that there is only one correct set of tools for manipulating equations if you want to avoid inconsistency.\n\nSimilarly, the laws of probability theory show that there is only one correct set of tools for manipulating _uncertainty_ if you want to avoid inconsistency. According to [1lz those rules], the right way to represent evidence is through likelihood functions.\n\nThese laws (and a solid understanding of them) are younger than the experimental science community, and the statistical tools of that community predate a modern understanding of probability theory. Thus, it makes a lot of sense that the existing literature uses different tools. However, now that humanity _does_ possess a solid understanding of probability theory, it should come as no surprise that many diverse pathologies in statistics can be cleaned up by switching to a policy of reporting likelihoods instead of p-values.\n\n## What are the drawbacks?\n\nThe main drawback is inertia. Experimental science today reports p-values almost entirely across the board. Modern statistical toolsets have built-in support for p-values (and other related statistical tools) but very little support for reporting likelihood functions. Experimental scientists are trained mainly in [-frequentist_statistics], and thus most are much more familiar with p-value-type tools than likelihood-function-type tools. Making the switch would be painful.\n\nBarring the switching costs, though, making the switch could well be a strict improvement over modern techniques, and would help solve some of the biggest [http://slatestarcodex.com/2014/04/28/the-control-group-is-out-of-control/ problems] [http://www.stat.columbia.edu/~gelman/research/published/asa_pvalues.pdf facing] [https://en.wikipedia.org/wiki/Data_dredging science] [https://en.wikipedia.org/wiki/Publication_bias today].\n\nSee also the [505 Likelihoods not p-values FAQ] and [4xx].",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 2,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [
"32",
"5",
"1yq"
],
"childIds": [
"4xx",
"505"
],
"parentIds": [
"1rf"
],
"commentIds": [
"4zs"
],
"questionIds": [],
"tagIds": [
"60p"
],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [
{
"id": "63",
"pageId": "4zd",
"lensId": "505",
"lensIndex": 0,
"lensName": "FAQ",
"lensSubtitle": "",
"createdBy": "32",
"createdAt": "2016-07-04 05:31:10",
"updatedBy": "32",
"updatedAt": "2016-07-04 05:31:16"
},
{
"id": "56",
"pageId": "4zd",
"lensId": "4xx",
"lensIndex": 1,
"lensName": "Dialog",
"lensSubtitle": "",
"createdBy": "32",
"createdAt": "2016-07-01 01:52:48",
"updatedBy": "32",
"updatedAt": "2016-07-04 05:31:16"
}
],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22511",
"pageId": "4zd",
"userId": "32",
"edit": 23,
"type": "newEdit",
"createdAt": "2017-04-29 05:00:59",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "22510",
"pageId": "4zd",
"userId": "32",
"edit": 22,
"type": "newEdit",
"createdAt": "2017-04-29 04:59:52",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20040",
"pageId": "4zd",
"userId": "32",
"edit": 21,
"type": "newEdit",
"createdAt": "2016-10-11 05:46:26",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20039",
"pageId": "4zd",
"userId": "32",
"edit": 20,
"type": "newEdit",
"createdAt": "2016-10-11 05:36:03",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20038",
"pageId": "4zd",
"userId": "32",
"edit": 19,
"type": "newEdit",
"createdAt": "2016-10-11 05:33:34",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "20037",
"pageId": "4zd",
"userId": "32",
"edit": 18,
"type": "newEdit",
"createdAt": "2016-10-11 05:32:21",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "19336",
"pageId": "4zd",
"userId": "1yq",
"edit": 17,
"type": "newEdit",
"createdAt": "2016-08-27 22:36:22",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": "added TOC and link to opinion page tag"
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "19335",
"pageId": "4zd",
"userId": "1yq",
"edit": 0,
"type": "newTag",
"createdAt": "2016-08-27 22:35:56",
"auxPageId": "60p",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "16365",
"pageId": "4zd",
"userId": "32",
"edit": 16,
"type": "newEdit",
"createdAt": "2016-07-10 12:56:32",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "16344",
"pageId": "4zd",
"userId": "5",
"edit": 15,
"type": "newEdit",
"createdAt": "2016-07-10 05:53:26",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15920",
"pageId": "4zd",
"userId": "32",
"edit": 14,
"type": "newEdit",
"createdAt": "2016-07-07 04:56:02",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15909",
"pageId": "4zd",
"userId": "32",
"edit": 13,
"type": "newEdit",
"createdAt": "2016-07-07 03:56:21",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15788",
"pageId": "4zd",
"userId": "32",
"edit": 12,
"type": "newEdit",
"createdAt": "2016-07-06 21:03:22",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15787",
"pageId": "4zd",
"userId": "32",
"edit": 11,
"type": "newEdit",
"createdAt": "2016-07-06 21:00:04",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15785",
"pageId": "4zd",
"userId": "32",
"edit": 10,
"type": "newEdit",
"createdAt": "2016-07-06 20:59:01",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15784",
"pageId": "4zd",
"userId": "32",
"edit": 9,
"type": "newEdit",
"createdAt": "2016-07-06 20:57:45",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15253",
"pageId": "4zd",
"userId": "32",
"edit": 8,
"type": "newEdit",
"createdAt": "2016-07-04 16:07:31",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15209",
"pageId": "4zd",
"userId": "32",
"edit": 0,
"type": "lensOrderChanged",
"createdAt": "2016-07-04 05:31:16",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15207",
"pageId": "4zd",
"userId": "32",
"edit": 0,
"type": "newChild",
"createdAt": "2016-07-04 05:31:06",
"auxPageId": "505",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15205",
"pageId": "4zd",
"userId": "32",
"edit": 7,
"type": "newEdit",
"createdAt": "2016-07-04 05:25:17",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15204",
"pageId": "4zd",
"userId": "32",
"edit": 6,
"type": "newEdit",
"createdAt": "2016-07-04 05:14:06",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15198",
"pageId": "4zd",
"userId": "5",
"edit": 5,
"type": "newEdit",
"createdAt": "2016-07-03 17:49:32",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": "builtin -> built-in"
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15163",
"pageId": "4zd",
"userId": "5",
"edit": 4,
"type": "newEdit",
"createdAt": "2016-07-03 09:22:17",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15071",
"pageId": "4zd",
"userId": "32",
"edit": 3,
"type": "newEdit",
"createdAt": "2016-07-01 16:59:38",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15042",
"pageId": "4zd",
"userId": "32",
"edit": 2,
"type": "newEdit",
"createdAt": "2016-07-01 07:31:12",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15007",
"pageId": "4zd",
"userId": "32",
"edit": 0,
"type": "newChild",
"createdAt": "2016-07-01 01:52:56",
"auxPageId": "4xx",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15006",
"pageId": "4zd",
"userId": "32",
"edit": 0,
"type": "newParent",
"createdAt": "2016-07-01 01:52:55",
"auxPageId": "1rf",
"oldSettingsValue": "",
"newSettingsValue": ""
},
{
"likeableId": "0",
"likeableType": "changeLog",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"id": "15004",
"pageId": "4zd",
"userId": "32",
"edit": 1,
"type": "newEdit",
"createdAt": "2016-07-01 01:52:54",
"auxPageId": "",
"oldSettingsValue": "",
"newSettingsValue": ""
}
],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": true,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {
"improveStub": {
"likeableId": "3744",
"likeableType": "contentRequest",
"myLikeValue": 0,
"likeCount": 1,
"dislikeCount": 0,
"likeScore": 1,
"individualLikes": [],
"id": "164",
"pageId": "4zd",
"requestType": "improveStub",
"createdAt": "2016-12-04 02:09:17"
}
}
},
"4zs": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4zs",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"Do the different biases of coin correspond to d...\"",
"clickbait": "",
"textLength": 160,
"alias": "4zs",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "5",
"editCreatedAt": "2016-07-01 16:55:26",
"pageCreatorId": "5",
"pageCreatedAt": "2016-07-01 16:55:26",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": true,
"isApprovedComment": true,
"isResolved": true,
"snapshotText": "",
"anchorContext": "Likelihood functions help avoid the decline effect by treating different effect sizes differently\\. The likelihood function for coins of different biases shows that the evidence HHHHHT gives a different amount of support to $H_{0.55},$ $H_{0.6}$, and $H_{0.8}.$ If three different studies find low support for $H_{0.5},$ and one of them gives all of its support to the large effect, another gives all its support to the medium effect, and the third gives al of its support to the smallest effect, then likelihood functions reveal that something fishy is going on \\(because they're all peaked in different places\\)\\.",
"anchorText": "large effect",
"anchorOffset": 369,
"mergedInto": "",
"isDeleted": false,
"viewCount": 3879,
"text": "Do the different biases of coin correspond to different effect sizes? (E.g. large effect corresponds to H0.8, medium to H0.6, small effect corresponds to H0.55)",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 2,
"maintainerCount": 2,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4zd"
],
"commentIds": [
"4zt"
],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"4zt": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "4zt",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"Yes.\"",
"clickbait": "",
"textLength": 4,
"alias": "4zt",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "32",
"editCreatedAt": "2016-07-01 16:56:18",
"pageCreatorId": "32",
"pageCreatedAt": "2016-07-01 16:56:18",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": true,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 3880,
"text": "Yes.",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4zd",
"4zs"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"51n": {
"likeableId": "3494",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "51n",
"edit": 2,
"editSummary": "",
"prevEdit": 1,
"currentEdit": 2,
"wasPublished": true,
"type": "wiki",
"title": "Likelihood notation",
"clickbait": "",
"textLength": 680,
"alias": "likelihood_notation",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "32",
"editCreatedAt": "2016-07-07 05:16:06",
"pageCreatorId": "32",
"pageCreatedAt": "2016-07-04 17:15:04",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 38,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"56s": {
"likeableId": "2993",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "56s",
"edit": 3,
"editSummary": "",
"prevEdit": 2,
"currentEdit": 3,
"wasPublished": true,
"type": "wiki",
"title": "Likelihood function",
"clickbait": "",
"textLength": 2100,
"alias": "likelihood_function",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-08-02 17:12:50",
"pageCreatorId": "32",
"pageCreatedAt": "2016-07-07 06:09:23",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 126,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"56t": {
"likeableId": "2992",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "56t",
"edit": 2,
"editSummary": "",
"prevEdit": 1,
"currentEdit": 2,
"wasPublished": true,
"type": "wiki",
"title": "Likelihood ratio",
"clickbait": "",
"textLength": 1432,
"alias": "likelihood_ratio",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "32",
"editCreatedAt": "2016-07-07 14:39:05",
"pageCreatorId": "32",
"pageCreatedAt": "2016-07-07 05:51:09",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 233,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"58c": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "58c",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "wiki",
"title": "Decision Theory",
"clickbait": "",
"textLength": 161,
"alias": "DecisionTheory",
"externalUrl": "",
"sortChildrenBy": "alphabetical",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1",
"editCreatedAt": "2016-07-08 18:23:14",
"pageCreatorId": "1",
"pageCreatedAt": "2016-07-08 18:23:14",
"seeDomainId": "0",
"editDomainId": "15",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 143,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"58l": {
"likeableId": "3060",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "58l",
"edit": 8,
"editSummary": "",
"prevEdit": 6,
"currentEdit": 8,
"wasPublished": true,
"type": "wiki",
"title": "Arbital user groups",
"clickbait": "Users can attain different powers and responsibilities on Arbital.",
"textLength": 2344,
"alias": "arbital_user_groups",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-07-19 00:24:53",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-07-09 00:06:07",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 91,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"60p": {
"likeableId": "3479",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 1,
"dislikeCount": 0,
"likeScore": 1,
"individualLikes": [],
"pageId": "60p",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "wiki",
"title": "Opinion page",
"clickbait": "Opinion pages represent one position on a topic (often from a single author), and are not necessarily balanced or a reflection of consensus.",
"textLength": 118,
"alias": "opinion_meta_tag",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "1yq",
"editCreatedAt": "2016-08-27 22:32:15",
"pageCreatorId": "1yq",
"pageCreatedAt": "2016-08-27 22:32:15",
"seeDomainId": "0",
"editDomainId": "3",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 23,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"6d6": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "6d6",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"\"That's because we're considering results like ...\"",
"clickbait": "",
"textLength": 346,
"alias": "6d6",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "5wf",
"editCreatedAt": "2016-10-14 20:55:34",
"pageCreatorId": "5wf",
"pageCreatedAt": "2016-10-14 20:55:34",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": true,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Undergrad: Okay, just to make sure I have this straight: That's because we're considering results like HHHTHH or TTTTTT to be equally or more extreme, and there are 14 total possible cases like that, and we flipped the coin 6 times which gives us $2^6 = 64$ possible results\\. 14/64 = 22%, which is not less than 5%, so this is not statistically significant at the $p<0.05$ level\\.",
"anchorText": "That's because we're considering results like HHHTHH or TTTTTT to be equally or more extreme, and there are 14 total possible cases like that",
"anchorOffset": 59,
"mergedInto": "",
"isDeleted": false,
"viewCount": 2940,
"text": "\"That's because we're considering results like HHHTHH or TTTTTT to be equally or more extreme, and there are 14 total possible cases like that.\"\n\nIt is not evident for me, because I am not familiar with statistics. I think where is a need to provide calculation, that there is two examples in form TTTTTT and HHHHHH and 6*2 like HTHHHH or HTTTTT.",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"6d8": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "6d8",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"\"We only ran the 2012 US Presidential Election ...\"",
"clickbait": "",
"textLength": 332,
"alias": "6d8",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "5wf",
"editCreatedAt": "2016-10-14 22:09:02",
"pageCreatorId": "5wf",
"pageCreatedAt": "2016-10-14 22:09:02",
"seeDomainId": "0",
"editDomainId": "1",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": true,
"isApprovedComment": true,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Bayesian: Are we going there? I guess we're going there\\. My good Scientist, I mean that if you offered me either side of an even\\-money bet on whether Plum committed the murder, I'd bet that he didn't do it\\. But if you offered me a gamble that costs \\$1 if Professor Plum is innocent and pays out \\$5 if he's guilty, I'd cheerfully accept that gamble\\. We only ran the 2012 US Presidential Election one time, but that doesn't mean that on November 7th you should've refused a \\$10 bet that paid out \\$1000 if Obama won\\. In general when prediction markets and large liquid betting pools put 60% betting odds on somebody winning the presidency, that outcome tends to happen 60% of the time; they are well\\-calibrated for probabilities in that range\\. If they were systematically uncalibrated\\-\\-if in general things happened 80% of the time when prediction markets said 60%\\-\\-you could use that fact to pump mony out of prediction markets\\. And your pumping out that money would adjust the prediction\\-market prices until they were well\\-calibrated\\. If things to which prediction markets assign 70% probability happen around 7 times out of 10, why insist for reasons of ideological purity that the probability statement is meaningless?",
"anchorText": "We only ran the 2012 US Presidential Election one time, but that doesn't mean that on November 7th you should've refused a \\$10 bet that paid out \\$1000 if Obama won\\.",
"anchorOffset": 360,
"mergedInto": "",
"isDeleted": false,
"viewCount": 2940,
"text": "\"We only ran the 2012 US Presidential Election one time, but that doesn't mean that on November 7th you should've refused a $10 bet that paid out $1000 if Obama won.\"\n\nFirst of all, as non American I do not know what is specific about November 7th. Second, I think that some people may do not even know, that Obama has actually won.",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"7ry": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "7ry",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "wiki",
"title": "Coherence theorems",
"clickbait": "A 'coherence theorem' shows that something bad happens to an agent if its decisions can't be viewed as 'coherent' in some sense. E.g., an inconsistent preference ordering leads to going in circles.",
"textLength": 6853,
"alias": "coherence_theorems",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "2",
"editCreatedAt": "2017-02-07 21:07:09",
"pageCreatorId": "2",
"pageCreatedAt": "2017-02-07 21:07:09",
"seeDomainId": "0",
"editDomainId": "15",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 334,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"8zq": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "8zq",
"edit": 2,
"editSummary": "",
"prevEdit": 1,
"currentEdit": 2,
"wasPublished": true,
"type": "group",
"title": "Adnll",
"clickbait": "Automatically generated page for Adnll",
"textLength": 115,
"alias": "AdnllL",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "8zq",
"editCreatedAt": "2018-02-15 17:44:28",
"pageCreatorId": "8zq",
"pageCreatedAt": "2018-02-03 16:56:49",
"seeDomainId": "0",
"editDomainId": "3017",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 1,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"9h5": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "9h5",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "group",
"title": "Eyal Roth",
"clickbait": "Automatically generated page for Eyal Roth",
"textLength": 124,
"alias": "EyalRoth",
"externalUrl": "",
"sortChildrenBy": "likes",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "9h5",
"editCreatedAt": "2019-03-18 12:58:43",
"pageCreatorId": "9h5",
"pageCreatedAt": "2019-03-18 12:58:43",
"seeDomainId": "0",
"editDomainId": "3306",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "",
"anchorText": "",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 1,
"text": "",
"metaText": "",
"isTextLoaded": false,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 0,
"maintainerCount": 0,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": [],
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": 0,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": null,
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": false,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"9hy": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "9hy",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"Not quite. The private state of mind of the res...\"",
"clickbait": "",
"textLength": 627,
"alias": "9hy",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "9h5",
"editCreatedAt": "2019-03-27 11:11:36",
"pageCreatorId": "9h5",
"pageCreatedAt": "2019-03-27 11:11:37",
"seeDomainId": "0",
"editDomainId": "3306",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Bayesian: And it doesn't bother you that the meaning of your experiment depends on your private state of mind?",
"anchorText": "Bayesian: And it doesn't bother you that the meaning of your experiment depends on your private state of mind?",
"anchorOffset": 0,
"mergedInto": "",
"isDeleted": false,
"viewCount": 391,
"text": "Not quite. The private state of mind of the researcher changes nothing. It's only the issue of which question is asked.\n\nIn this case, the two questions are (a) what is the probability of such an event occurring after tossing a fair coin 6 times and (b) what is the probability of such an event occurring if a fair coin is tossed until it lands tails.\n\nThe meaning of the questions does not change, nor do the answers to them. It is only a matter of what question is being asked -- which is obviously important when conducting a study, but is not so counter-intuitive (and much less confusing) when presented in such way (IMO).",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"9j0": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "9j0",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"This \"argument\" by the \"scientist\" doesn't IMO ...\"",
"clickbait": "",
"textLength": 664,
"alias": "9j0",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "9h5",
"editCreatedAt": "2019-03-27 15:07:47",
"pageCreatorId": "9h5",
"pageCreatedAt": "2019-03-27 15:07:47",
"seeDomainId": "0",
"editDomainId": "3306",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Scientist: So you're actually more of a computer science guy than an experimentalist yourself\\. Why does this not surprise me? It's not impossible that some better statistical system than p\\-values could exist, but I'd advise you to respect the wisdom of experience\\. The fact that we know what p\\-hacking is, and are currently fighting it, is because we've had time to see where the edges of the system have problems, and we're figuring out how to fight those problems\\. This shiny new system will also have problems; you just have no idea what they'll be\\. Perhaps they'll be worse\\.",
"anchorText": "So you're actually more of a computer science guy than an experimentalist yourself\\. Why does this not surprise me? It's not impossible that some better statistical system than p\\-values could exist, but I'd advise you to respect the wisdom of experience\\. The fact that we know what p\\-hacking is, and are currently fighting it, is because we've had time to see where the edges of the system have problems, and we're figuring out how to fight those problems\\. This shiny new system will also have problems; you just have no idea what they'll be\\. Perhaps they'll be worse\\.",
"anchorOffset": 12,
"mergedInto": "",
"isDeleted": false,
"viewCount": 390,
"text": "This \"argument\" by the \"scientist\" doesn't IMO represent how a true experimentalist would approach the issue; they would not necessarily be so opposed to trying new ways of improving their methods, as long as it is done step by step without replacing the entire system over night (just like the \"bayesian\" explains in the next paragraph).\n\nThis is also a bit side-tracking as it opens up the topic of how much more \"experience\" computer scientists have given the simpler and much more reproducible systems they're dealing with -- especially in the modern commercial world -- in contrast with natural sciences (I'm a programmer myself, so I'm a bit biased on this).",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"9j1": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "9j1",
"edit": 2,
"editSummary": "",
"prevEdit": 1,
"currentEdit": 2,
"wasPublished": true,
"type": "comment",
"title": "\"I feel like this paragraph might be a little ne...\"",
"clickbait": "",
"textLength": 518,
"alias": "9j1",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "9h5",
"editCreatedAt": "2019-03-27 15:18:56",
"pageCreatorId": "9h5",
"pageCreatedAt": "2019-03-27 15:17:37",
"seeDomainId": "0",
"editDomainId": "3306",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Bayesian: Are we going there? I guess we're going there\\. My good Scientist, I mean that if you offered me either side of an even\\-money bet on whether Plum committed the murder, I'd bet that he didn't do it\\. But if you offered me a gamble that costs \\$1 if Professor Plum is innocent and pays out \\$5 if he's guilty, I'd cheerfully accept that gamble\\. We only ran the 2012 US Presidential Election one time, but that doesn't mean that on November 7th you should've refused a \\$10 bet that paid out \\$1000 if Obama won\\. In general when prediction markets and large liquid betting pools put 60% betting odds on somebody winning the presidency, that outcome tends to happen 60% of the time; they are well\\-calibrated for probabilities in that range\\. If they were systematically uncalibrated\\-\\-if in general things happened 80% of the time when prediction markets said 60%\\-\\-you could use that fact to pump money out of prediction markets\\. And your pumping out that money would adjust the prediction\\-market prices until they were well\\-calibrated\\. If things to which prediction markets assign 70% probability happen around 7 times out of 10, why insist for reasons of ideological purity that the probability statement is meaningless?",
"anchorText": "Are we going there? I guess we're going there\\.",
"anchorOffset": 11,
"mergedInto": "",
"isDeleted": false,
"viewCount": 390,
"text": "I feel like this paragraph might be a little necessary for someone who haven't read the bayes rule intro, but on the other hand is a bit off-topic in this context and quite distracting, as it raises questions which are not part of this \"discussion\"; mainly, questions regarding how to approach \"one-off\" events.\n\nSay, what if I can't quantify the outcome of my decision so nicely like in the case of a bet? What if I need to decide whether to send Miss Scarlet to prison or not based on these likelihood probabilities?",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
},
"9j2": {
"likeableId": "0",
"likeableType": "page",
"myLikeValue": 0,
"likeCount": 0,
"dislikeCount": 0,
"likeScore": 0,
"individualLikes": [],
"pageId": "9j2",
"edit": 1,
"editSummary": "",
"prevEdit": 0,
"currentEdit": 1,
"wasPublished": true,
"type": "comment",
"title": "\"I'm going to take the role of the \"undergrad\" h...\"",
"clickbait": "",
"textLength": 612,
"alias": "9j2",
"externalUrl": "",
"sortChildrenBy": "recentFirst",
"hasVote": false,
"voteType": "",
"votesAnonymous": false,
"editCreatorId": "9h5",
"editCreatedAt": "2019-03-27 15:33:15",
"pageCreatorId": "9h5",
"pageCreatedAt": "2019-03-27 15:33:15",
"seeDomainId": "0",
"editDomainId": "3306",
"submitToDomainId": "0",
"isAutosave": false,
"isSnapshot": false,
"isLiveEdit": true,
"isMinorEdit": false,
"indirectTeacher": false,
"todoCount": 0,
"isEditorComment": false,
"isApprovedComment": false,
"isResolved": false,
"snapshotText": "",
"anchorContext": "Bayesian: I wrote pretty much the same Python program when I was first converting to Bayesianism and finding out about likelihood ratios and feeling skeptical about the system maybe being abusable in some way, and then a friend of mine found out about likelihood ratios and he wrote essentially the same program, also in Python\\. And lo, he found that false evidence of 20:1 for the coin being 55% biased was found at least once, somewhere along the way\\.\\.\\. 1\\.4% of the time\\. If you asked for more extreme likelihood ratios, the chances of finding them dropped off even faster\\.",
"anchorText": "And lo, he found that false evidence of 20:1 for the coin being 55% biased was found at least once, somewhere along the way\\.\\.\\. 1\\.4% of the time",
"anchorOffset": 332,
"mergedInto": "",
"isDeleted": false,
"viewCount": 390,
"text": "I'm going to take the role of the \"undergrad\" here and try to interpret this in the following way:\n\nGiven that a hypothesis is true -- but it is unknown to be true -- it is far more likely to come by a \"statistically significant\" result indicating it is wrong, than it is likely to come by a result indicating that another hypothesis is significantly more likely.\n\nIn simpler words - it is far easier to \"prove\" a true hypothesis is wrong by accident, than it is to \"prove\" that an alternative hypothesis is superior (a better estimator of reality) by accident.\n\nWould you consider this interpretation accurate? ",
"metaText": "",
"isTextLoaded": true,
"isSubscribedToDiscussion": false,
"isSubscribedToUser": false,
"isSubscribedAsMaintainer": false,
"discussionSubscriberCount": 1,
"maintainerCount": 1,
"userSubscriberCount": 0,
"lastVisit": "",
"hasDraft": false,
"votes": [],
"voteSummary": null,
"muVoteSummary": 0,
"voteScaling": 0,
"currentUserVote": -2,
"voteCount": 0,
"lockedVoteType": "",
"maxEditEver": 0,
"redLinkCount": 0,
"lockedBy": "",
"lockedUntil": "",
"nextPageId": "",
"prevPageId": "",
"usedAsMastery": false,
"proposalEditNum": 0,
"permissions": {
"edit": {
"has": false,
"reason": "You don't have domain permission to edit this page"
},
"proposeEdit": {
"has": true,
"reason": ""
},
"delete": {
"has": false,
"reason": "You don't have domain permission to delete this page"
},
"comment": {
"has": false,
"reason": "You can't comment in this domain because you are not a member"
},
"proposeComment": {
"has": true,
"reason": ""
}
},
"summaries": {},
"creatorIds": [],
"childIds": [],
"parentIds": [
"4xx"
],
"commentIds": [],
"questionIds": [],
"tagIds": [],
"relatedIds": [],
"markIds": [],
"explanations": [],
"learnMore": [],
"requirements": [],
"subjects": [],
"lenses": [],
"lensParentId": "",
"pathPages": [],
"learnMoreTaughtMap": {},
"learnMoreCoveredMap": {},
"learnMoreRequiredMap": {},
"editHistory": {},
"domainSubmissions": {},
"answers": [],
"answerCount": 0,
"commentCount": 0,
"newCommentCount": 0,
"linkedMarkCount": 0,
"changeLogs": [],
"feedSubmissions": [],
"searchStrings": {},
"hasChildren": false,
"hasParents": true,
"redAliases": {},
"improvementTagIds": [],
"nonMetaTagIds": [],
"todos": [],
"slowDownMap": null,
"speedUpMap": null,
"arcPageIds": null,
"contentRequests": {}
}
},
"edits": {},
"users": {
"1": {
"id": "1",
"firstName": "Alexei",
"lastName": "Andreev",
"lastWebsiteVisit": "2018-02-18 09:35:21",
"isSubscribed": false,
"domainMembershipMap": {}
},
"2": {
"id": "2",
"firstName": "Eliezer",
"lastName": "Yudkowsky",
"lastWebsiteVisit": "2018-12-31 03:31:07",
"isSubscribed": false,
"domainMembershipMap": {}
},
"5": {
"id": "5",
"firstName": "Eric",
"lastName": "Rogstad",
"lastWebsiteVisit": "2019-02-06 17:46:54",
"isSubscribed": false,
"domainMembershipMap": {}
},
"32": {
"id": "32",
"firstName": "Nate",
"lastName": "Soares",
"lastWebsiteVisit": "2017-05-10 13:41:18",
"isSubscribed": false,
"domainMembershipMap": {}
},
"267": {
"id": "267",
"firstName": "Patrick",
"lastName": "Stevens",
"lastWebsiteVisit": "2017-01-20 07:06:48",
"isSubscribed": false,
"domainMembershipMap": {}
},
"345": {
"id": "345",
"firstName": "Rob",
"lastName": "Bensinger",
"lastWebsiteVisit": "2017-09-08 22:56:47",
"isSubscribed": false,
"domainMembershipMap": {}
},
"414": {
"id": "414",
"firstName": "Pierre",
"lastName": "Thierry",
"lastWebsiteVisit": "2018-11-03 17:23:36",
"isSubscribed": false,
"domainMembershipMap": {}
},
"1ch": {
"id": "1ch",
"firstName": "Andrew",
"lastName": "McKnight",
"lastWebsiteVisit": "2017-05-11 06:26:38",
"isSubscribed": false,
"domainMembershipMap": {}
},
"1yq": {
"id": "1yq",
"firstName": "Eric",
"lastName": "Bruylant",
"lastWebsiteVisit": "2017-04-14 18:00:22",
"isSubscribed": false,
"domainMembershipMap": {}
},
"2cl": {
"id": "2cl",
"firstName": "Travis",
"lastName": "Rivera",
"lastWebsiteVisit": "2017-10-08 22:46:28",
"isSubscribed": false,
"domainMembershipMap": {}
},
"2h6": {
"id": "2h6",
"firstName": "Mathieu",
"lastName": "Roy",
"lastWebsiteVisit": "2017-11-06 12:04:13",
"isSubscribed": false,
"domainMembershipMap": {}
},
"2vh": {
"id": "2vh",
"firstName": "Jaime",
"lastName": "Sevilla Molina",
"lastWebsiteVisit": "2018-12-06 12:14:41",
"isSubscribed": false,
"domainMembershipMap": {}
},
"34s": {
"id": "34s",
"firstName": "Ryan",
"lastName": "Blough",
"lastWebsiteVisit": "2017-11-28 01:20:59",
"isSubscribed": false,
"domainMembershipMap": {}
},
"4c1": {
"id": "4c1",
"firstName": "Mark",
"lastName": "Chimes",
"lastWebsiteVisit": "2017-04-10 17:09:17",
"isSubscribed": false,
"domainMembershipMap": {}
},
"4tg": {
"id": "4tg",
"firstName": "Joe",
"lastName": "Zeng",
"lastWebsiteVisit": "2019-04-27 21:01:44",
"isSubscribed": false,
"domainMembershipMap": {}
},
"5lh": {
"id": "5lh",
"firstName": "Kulbhushan",
"lastName": "Chand",
"lastWebsiteVisit": "2016-07-26 06:41:04",
"isSubscribed": false,
"domainMembershipMap": {}
},
"5wf": {
"id": "5wf",
"firstName": "Gregor",
"lastName": "Gerasev",
"lastWebsiteVisit": "2017-04-12 13:04:59",
"isSubscribed": false,
"domainMembershipMap": {}
},
"6jy": {
"id": "6jy",
"firstName": "Katherine",
"lastName": "Savoie",
"lastWebsiteVisit": "2017-02-22 02:08:37",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8c0": {
"id": "8c0",
"firstName": "Peter",
"lastName": "Tapley",
"lastWebsiteVisit": "2017-09-12 14:27:10",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8d0": {
"id": "8d0",
"firstName": "gia",
"lastName": "dang",
"lastWebsiteVisit": "2017-05-23 13:08:41",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8s6": {
"id": "8s6",
"firstName": "Steven",
"lastName": "Zuber",
"lastWebsiteVisit": "2018-02-07 15:45:40",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8t7": {
"id": "8t7",
"firstName": "Drake",
"lastName": "Thomas",
"lastWebsiteVisit": "2017-11-29 04:44:41",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8wr": {
"id": "8wr",
"firstName": "Alexander",
"lastName": "Perepechko",
"lastWebsiteVisit": "2018-01-12 21:40:39",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8xq": {
"id": "8xq",
"firstName": "Nathan",
"lastName": "Fish",
"lastWebsiteVisit": "2018-03-23 21:11:53",
"isSubscribed": false,
"domainMembershipMap": {}
},
"8zq": {
"id": "8zq",
"firstName": "Adnll",
"lastName": "L",
"lastWebsiteVisit": "2018-05-25 16:19:23",
"isSubscribed": false,
"domainMembershipMap": {}
},
"9bk": {
"id": "9bk",
"firstName": "Richard",
"lastName": "Wundrack",
"lastWebsiteVisit": "2018-11-23 21:06:02",
"isSubscribed": false,
"domainMembershipMap": {}
},
"9bv": {
"id": "9bv",
"firstName": "Grigoriy",
"lastName": "Beziuk",
"lastWebsiteVisit": "2018-11-21 17:55:14",
"isSubscribed": false,
"domainMembershipMap": {}
},
"9h5": {
"id": "9h5",
"firstName": "Eyal",
"lastName": "Roth",
"lastWebsiteVisit": "2019-05-02 12:45:22",
"isSubscribed": false,
"domainMembershipMap": {}
}
},
"domains": {
"1": {
"id": "1",
"pageId": "1lw",
"createdAt": "2016-01-15 03:02:51",
"alias": "math",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"2": {
"id": "2",
"pageId": "2v",
"createdAt": "2015-03-26 23:12:18",
"alias": "value_alignment",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"3": {
"id": "3",
"pageId": "3d",
"createdAt": "2015-03-30 22:19:47",
"alias": "Arbital",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"8": {
"id": "8",
"pageId": "198",
"createdAt": "2015-12-13 23:14:48",
"alias": "TeamArbital",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"15": {
"id": "15",
"pageId": "58c",
"createdAt": "2016-07-08 18:23:14",
"alias": "DecisionTheory",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"3017": {
"id": "3017",
"pageId": "8zq",
"createdAt": "2018-02-03 16:56:49",
"alias": "AdnllL",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
},
"3306": {
"id": "3306",
"pageId": "9h5",
"createdAt": "2019-03-18 12:58:43",
"alias": "EyalRoth",
"canUsersComment": false,
"canUsersProposeComment": true,
"canUsersProposeEdits": true,
"friendDomainIds": []
}
},
"masteries": {
"1x5": {
"pageId": "1x5",
"has": false,
"wants": false,
"level": 0,
"updatedAt": ""
},
"4vr": {
"pageId": "4vr",
"has": false,
"wants": false,
"level": 0,
"updatedAt": ""
},
"4xx": {
"pageId": "4xx",
"has": false,
"wants": false,
"level": 0,
"updatedAt": ""
},
"4zd": {
"pageId": "4zd",
"has": false,
"wants": false,
"level": 0,
"updatedAt": ""
}
},
"marks": {},
"pageObjects": {},
"result": {
"lensId": "4xx",
"primaryPageId": "4zd"
},
"globalData": {
"privateDomain": {
"id": "0",
"pageId": "",
"createdAt": "",
"alias": "",
"canUsersComment": false,
"canUsersProposeComment": false,
"canUsersProposeEdits": false,
"friendDomainIds": []
},
"improvementTagIds": [
"15r",
"15s",
"3rk",
"3wq",
"433",
"4d3",
"4lg",
"4pt",
"4w9",
"4ym",
"54j",
"55s",
"5cb",
"5gr",
"5sv",
"5t7",
"5tb",
"5v6",
"5xq",
"72"
]
}
}