Bayesian Knowledge Tracing
probabilistic model
pL ā latent (mastery)
pT ā transition (learning)
pG ā guess
pS ā slip
Learning order (K)
Problem difficulty
Prior knowledge (initial assessment + sequential)
*Learning rate/speed (derivatives / ODE or PDE)
Equation(a):p(L1ā)ukā=p(L0ā)k
#@title Initialize Parameters
import numpy as np
def initialize_parameters(pL, pT, pS, pG):
np.random.seed(1)
pL = np.random.randn(1)
pT = np.random.randn(1)
pS = np.random.randn(1)
pG = np.random.randn(1)
parameters = {
'pL': pL,
'pT': pT,
'pS': pS,
'pG': pG
}
return parameters
Equation(b):p(Ltāā£obs=correct)ukā=p(Ltā)ukāā
(1āp(S)k)+(1āp(Ltā)ukā)ā
p(G)kp(Ltā)ukāā
(1āp(S)k)ā
#@title Eq. b
# Two state (0 or 1)
# 0 when pL < 0.5; 1 when pL >= 0.5
def correct_latent(pL, pS, pG, T):
parameters = {}
for t in range(T-1):
parameters['pL_correct_obs' + str(t)] = np.dot(pL, (1 - pS))/(np.dot(pL, (1 - pS)) + np.dot((1 - pL), pG))
return 'pL_correct_obs' + str(t)
Equation(c):p(Ltāā£obs=wrong)ukā=p(Ltā)ukāā
p(S)k+(1āp(Ltā)ukā)ā
(1āp(G)k)p(Ltā)ukāā
p(S)kā
#@title Eq. c
# Two state (0 or 1)
# 0 when pL < 0.5; 1 when pL >= 0.5
def wrong_latent(pL, pS, pG, T):
parameters = {}
for t in range(T-1):
parameters['pL_wrong_obs' + str(t)] = np.dot(pL, pS)/(np.dot(pL, pS) + np.dot((1 - pL), (1 - pG)))
return 'pL_wrong_obs' + str(t)
Equation(d):p(Lt+1ā)ukā=p(Ltāā£obs)ukā+(1āp(Ltāā£obs)ukā)ā
p(T)k
#@title Eq. d
def update_latent(pL_obs, pT, T, condition):
parameters = {}
if condition == 'correct':
for t in range(T-1):
pL = 'pL_correct_obs' + str(t)
parameters['pL' + str(t + 1)] = pL + np.multiply((1 - pL), pT)
elif condition == 'wrong':
for t in range(T-1):
pL = 'pL_wrong_obs' + str(t)
parameters['pL' + str(t + 1)] = pL + np.multiply((1 - pL), pT)
return 'pL' + str(t + 1)
Equation(e):p(Ct+1ā)ukā=p(Lt+1ā)ukāā
(1āp(S)k)+(1āp(Lt+1ā)ukā)ā
p(G)k
#@title Eq. e
def observation(pL, pS, pG, T):
parameters = {}
for t in range(T-1):
parameters['pC' + str(t + 1)] = np.dot(pL, (1 - pS)) + np.dot((1 - pL), pG)
return 'pC' + str(t + 1)
Last updated
Was this helpful?