Bayesian Knowledge Tracing
Last updated
Last updated
probabilistic model
pL ā latent (mastery)
pT ā transition (learning)
pG ā guess
pS ā slip
Learning order (K)
Problem difficulty
Prior knowledge (initial assessment + sequential)
*Learning rate/speed (derivatives / ODE or PDE)
#@title Initialize Parameters
import numpy as np
def initialize_parameters(pL, pT, pS, pG):
np.random.seed(1)
pL = np.random.randn(1)
pT = np.random.randn(1)
pS = np.random.randn(1)
pG = np.random.randn(1)
parameters = {
'pL': pL,
'pT': pT,
'pS': pS,
'pG': pG
}
return parameters
#@title Eq. b
# Two state (0 or 1)
# 0 when pL < 0.5; 1 when pL >= 0.5
def correct_latent(pL, pS, pG, T):
parameters = {}
for t in range(T-1):
parameters['pL_correct_obs' + str(t)] = np.dot(pL, (1 - pS))/(np.dot(pL, (1 - pS)) + np.dot((1 - pL), pG))
return 'pL_correct_obs' + str(t)
#@title Eq. c
# Two state (0 or 1)
# 0 when pL < 0.5; 1 when pL >= 0.5
def wrong_latent(pL, pS, pG, T):
parameters = {}
for t in range(T-1):
parameters['pL_wrong_obs' + str(t)] = np.dot(pL, pS)/(np.dot(pL, pS) + np.dot((1 - pL), (1 - pG)))
return 'pL_wrong_obs' + str(t)
#@title Eq. d
def update_latent(pL_obs, pT, T, condition):
parameters = {}
if condition == 'correct':
for t in range(T-1):
pL = 'pL_correct_obs' + str(t)
parameters['pL' + str(t + 1)] = pL + np.multiply((1 - pL), pT)
elif condition == 'wrong':
for t in range(T-1):
pL = 'pL_wrong_obs' + str(t)
parameters['pL' + str(t + 1)] = pL + np.multiply((1 - pL), pT)
return 'pL' + str(t + 1)
#@title Eq. e
def observation(pL, pS, pG, T):
parameters = {}
for t in range(T-1):
parameters['pC' + str(t + 1)] = np.dot(pL, (1 - pS)) + np.dot((1 - pL), pG)
return 'pC' + str(t + 1)