#-------------------------------------------------------------------------
library(ndl)
source("learnWeights.R") # creates learnWeights.fnc
dat = read.table("dataDan.txt", T, stringsAsFactors=FALSE)
dat$Cues = orthoCoding(dat$String, grams=2)
dat$Frequency=1
dat$Outcomes = dat$Type
# reshuffle order of learning events
# use Danks equilibrium equations
wD = estimateWeights(dat, alpha=0.1, beta=0.1)
aD = estimateActivations(dat, wD)$activationMatrix
aD = aD[,order(colnames(aD))]
dat$ChoiceD = apply(aD, 1, FUN=function(v){
if(v[1]>=v[2]) {
return("nonword")
} else {
return("word")
}})
table(dat$Type, dat$ChoiceD)
#
# nonword word
# nonword 7832 0
# word 116 191
# use RW equations learning event by learning event
# now order becomes important
set.seed(314)
dat = dat[sample(1:nrow(dat)),]
cues = unique(unlist(strsplit(dat$Cues, "_")))
outcomes = unique(unlist(strsplit(dat$Outcomes, "_")))
wRW = learnWeights.fnc(dat, cues, outcomes, alpha=0.1, beta=0.1)
aRW = estimateActivations(dat, wRW)$activationMatrix
aRW = aRW[,order(colnames(aRW))]
dat$ChoiceRW = apply(aRW, 1, FUN=function(v){
if(v[1]>=v[2]) {
return("nonword")
} else {
return("word")
}})
table(dat$Type, dat$ChoiceRW)
#
# nonword word
# nonword 7832 0
# word 296 11
#-------------------------------------------------------------------------
# Let's create a fictive first block of 100 learning trials.
#-------------------------------------------------------------------------
source("learnWeights.R") # creates learnWeights.fnc
dat = read.table("dataDan.txt", T, stringsAsFactors=FALSE)
dat$Cues = orthoCoding(dat$String, grams=2)
dat$Frequency=1
dat$Outcomes = dat$Type
set.seed(314)
words = dat[dat$Type=="word", ]
kNumWords = dim(words)[1]
nonwords = dat[dat$Type=="nonword", ]
kNumNonwords = dim(nonwords)[1]
words1 = words[sample(1:nrow(words), 50), ]
nonwords1 = nonwords[sample(1:nrow(nonwords),50), ]
block1 = rbind(words1,nonwords1)
block1 = block1[sample(1:nrow(block1)), ]
cues = unique(unlist(strsplit(block1$Cues, "_")))
outcomes = unique(unlist(strsplit(block1$Outcomes, "_")))
block1.w = learnWeights.fnc(datset=block1,cueset=cues, outcomeset=outcomes,
alpha=sqrt(0.001), beta=sqrt(0.001), lambda=1.0)
block1.a = estimateActivations(block1, block1.w)$activationMatrix
block1.a = block1.a[ ,order(colnames(block1.a))]
block1$Choice = apply(block1.a, 1, FUN=function(v){
if(v[1]>=v[2]) {
return("nonword")
} else {
return("word")
}})
table(block1$Type, block1$Choice)
#
# nonword word
# nonword 38 12
# word 1 49
#add learned words to the vector of learned words
learned.words = numeric(kNumWords)
counter = 1
for(i in 1:100) {
if(block1[i, ]$Type == "word" & block1[i, ]$Choice == "word" ) {
learned.words[counter] = learned.words[counter] = block1[i, ]$row.names
counter = counter + 1
}
}
learned.words
#some aspects of the experiment as described in the paper
#kNumBlocks = 30
kNumTrialsPerBlock = 100 #2000 ?????
kNumBlocks = 2000
for(i in 1:1) {
set.seed(314)
current.words = words[sample(1:nrow(words), 50), ] #to be changed
current.nonwords = nonwords[sample(1:nrow(nonwords),50), ]
block.words = rbind(current.words, current.nonwords)
block.words = block.words[sample(1:nrow(block.words)), ]
cues = unique(unlist(strsplit(block.words$Cues, "_")))
outcomes = unique(unlist(strsplit(block.words$Outcomes, "_")))
block.words.w = learnWeights.fnc(datset=block.words,cueset=cues, outcomeset=outcomes,
alpha=sqrt(0.001), beta=sqrt(0.001), lambda=1.0)
block.words.a = estimateActivations(block.words, block.words.w)$activationMatrix
block.words.a = block.words.a[ ,order(colnames(block.words.a))]
block.words$Choice = apply(block.words.a, 1, FUN=function(v){
if(v[1]>=v[2]) {
return("nonword")
} else {
return("word")
}})
print(table(block.words$Type, block.words$Choice))
}