## Linear discriminant analyses

Discriminant analysis is a classification method. It assumes that different classes generate data based on different Gaussian distributions. To perform Linear Discriminant Analysis in R we will make use of the lda function in MASS package.

Data processing

library(MASS)
library(caret)
library(phonR)

# loading the data:
Arabic <- read.csv("SpeakingRate24.csv")

# delete a column containing Arabic alphabet to avoid bugs.
Arabic <- subset(Arabic, select = -Arabic_phrase) 

# delete spaces
Arabic$CVC <- gsub(' ', '', Arabic$CVC)
Arabic$V <- gsub(' ', '', Arabic$V)
Arabic$Length <- gsub(' ', '', Arabic$Length)

Arabic$f0ons <- as.integer(gsub('--undefined--', NA, Arabic$f0ons))
Arabic$f0mid <- as.integer(gsub('--undefined--', NA, Arabic$f0mid))
Arabic$f0off <- as.integer(gsub('--undefined--', NA, Arabic$f0off))

# Transform all variable types from "character" to factors.
Arabic <- dplyr::mutate_if(Arabic, is.character, as.factor) 

# display order of V
Arabic$V = factor(Arabic$V, levels = c('i','a','u','i:','a:','u:','e:','o:')) 
# display order of C
Arabic$C <- factor(Arabic$C, levels=c("b","d","g","t","k")) 
Arabic$VOTi <- as.integer(Arabic$VOTi)

# Normalisation
Arabic$F1midnor <- with(Arabic, normLobanov(F1mid)) # F1mid normalisation
Arabic$F2midnor <- with(Arabic, normLobanov(F2mid)) # F2mid normalisation

lda of all vowels with and without temporal information :

model = lda(V ~ F1midnor+F2midnor, na.action = na.omit, data=Arabic,CV=TRUE)
prev = model$class
# total percent correct
print(sum(prev==Arabic$V)/nrow(Arabic))
## [1] 0.5764583
# percent correct for each category of V
selectTabs <-table(prev,Arabic$V)
confusionMatrix(selectTabs)
## Confusion Matrix and Statistics
## 
##     
## prev    i    a    u   i:   a:   u:   e:   o:
##   i   558   89   32  129    3    1  368    0
##   a    72  532  155    1  212    6  159   93
##   u    71   65  416    2   11  222   10  136
##   i:  199   22    0 1048    0    0  122    0
##   a:    0  254    4    0  899    0    1   65
##   u:    0    0  365    0    0  917    0  280
##   e:  300  126    5   20    6    0  540    2
##   o:    0  112  223    0   69   54    0  624
## 
## Overall Statistics
##                                           
##                Accuracy : 0.5765          
##                  95% CI : (0.5665, 0.5864)
##     No Information Rate : 0.125           
##     P-Value [Acc > NIR] : < 2.2e-16       
##                                           
##                   Kappa : 0.516           
##                                           
##  Mcnemar's Test P-Value : NA              
## 
## Statistics by Class:
## 
##                      Class: i Class: a Class: u Class: i: Class: a: Class: u:
## Sensitivity           0.46500  0.44333  0.34667    0.8733   0.74917   0.76417
## Specificity           0.92595  0.91690  0.93845    0.9592   0.96143   0.92321
## Pos Pred Value        0.47288  0.43252  0.44587    0.7534   0.73508   0.58707
## Neg Pred Value        0.92375  0.92019  0.90954    0.9815   0.96407   0.96479
## Prevalence            0.12500  0.12500  0.12500    0.1250   0.12500   0.12500
## Detection Rate        0.05813  0.05542  0.04333    0.1092   0.09365   0.09552
## Detection Prevalence  0.12292  0.12812  0.09719    0.1449   0.12740   0.16271
## Balanced Accuracy     0.69548  0.68012  0.64256    0.9163   0.85530   0.84369
##                      Class: e: Class: o:
## Sensitivity            0.45000    0.5200
## Specificity            0.94536    0.9455
## Pos Pred Value         0.54054    0.5767
## Neg Pred Value         0.92326    0.9324
## Prevalence             0.12500    0.1250
## Detection Rate         0.05625    0.0650
## Detection Prevalence   0.10406    0.1127
## Balanced Accuracy      0.69768    0.7327
###

model = lda(V ~ F1midnor+F2midnor+Duration, na.action = na.omit, data=Arabic,CV=TRUE)
prev = model$class
# total percent correct
print(sum(prev==Arabic$V)/nrow(Arabic))
## [1] 0.7135417
# percent correct for each category of V
selectTabs <-table(prev,Arabic$V)
confusionMatrix(selectTabs)
## Confusion Matrix and Statistics
## 
##     
## prev    i    a    u   i:   a:   u:   e:   o:
##   i   928  184   34  134    2    1  191    1
##   a    96  750  153    1  133    7   62  105
##   u    68   95  891    2    1  253    3  120
##   i:   62    8    0 1039    0    0  128    0
##   a:    0  110    1    0  940    0   16   74
##   u:    0    0   82    0    0  867    0  257
##   e:   46   14    0   24   26    0  793    1
##   o:    0   39   39    0   98   72    7  642
## 
## Overall Statistics
##                                           
##                Accuracy : 0.7135          
##                  95% CI : (0.7044, 0.7226)
##     No Information Rate : 0.125           
##     P-Value [Acc > NIR] : < 2.2e-16       
##                                           
##                   Kappa : 0.6726          
##                                           
##  Mcnemar's Test P-Value : NA              
## 
## Statistics by Class:
## 
##                      Class: i Class: a Class: u Class: i: Class: a: Class: u:
## Sensitivity           0.77333  0.62500  0.74250    0.8658   0.78333   0.72250
## Specificity           0.93488  0.93369  0.93548    0.9764   0.97607   0.95964
## Pos Pred Value        0.62915  0.57383  0.62177    0.8399   0.82384   0.71891
## Neg Pred Value        0.96652  0.94574  0.96216    0.9807   0.96926   0.96033
## Prevalence            0.12500  0.12500  0.12500    0.1250   0.12500   0.12500
## Detection Rate        0.09667  0.07812  0.09281    0.1082   0.09792   0.09031
## Detection Prevalence  0.15365  0.13615  0.14927    0.1289   0.11885   0.12562
## Balanced Accuracy     0.85411  0.77935  0.83899    0.9211   0.87970   0.84107
##                      Class: e: Class: o:
## Sensitivity            0.66083   0.53500
## Specificity            0.98679   0.96964
## Pos Pred Value         0.87721   0.71572
## Neg Pred Value         0.95320   0.93588
## Prevalence             0.12500   0.12500
## Detection Rate         0.08260   0.06688
## Detection Prevalence   0.09417   0.09344
## Balanced Accuracy      0.82381   0.75232
####

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, na.action = na.omit, data=Arabic,CV=TRUE)
prev = model$class
# total percent correct
print(sum(prev==Arabic$V)/nrow(Arabic))
## [1] 0.7007292
# percent correct for each category of V
selectTabs <-table(prev,Arabic$V)
confusionMatrix(selectTabs)
## Confusion Matrix and Statistics
## 
##     
## prev    i    a    u   i:   a:   u:   e:   o:
##   i   828  153   23   59    2    1   83    1
##   a    61  610  108    1   66    2   41   68
##   u    38   67  688    2    0  153    0   62
##   i:   45    3    0 1068    0    0  142    0
##   a:    3  184    6    0  995    0    9   73
##   u:    0    1  207    0    1  933    0  309
##   e:  221   70   23   70   36    6  921    3
##   o:    4  112  145    0  100  105    4  684
## 
## Overall Statistics
##                                           
##                Accuracy : 0.7007          
##                  95% CI : (0.6915, 0.7099)
##     No Information Rate : 0.125           
##     P-Value [Acc > NIR] : < 2.2e-16       
##                                           
##                   Kappa : 0.658           
##                                           
##  Mcnemar's Test P-Value : NA              
## 
## Statistics by Class:
## 
##                      Class: i Class: a Class: u Class: i: Class: a: Class: u:
## Sensitivity           0.69000  0.50833  0.57333    0.8900    0.8292   0.77750
## Specificity           0.96167  0.95869  0.96167    0.9774    0.9673   0.93833
## Pos Pred Value        0.72000  0.63741  0.68119    0.8490    0.7835   0.64300
## Neg Pred Value        0.95598  0.93174  0.94040    0.9842    0.9754   0.96724
## Prevalence            0.12500  0.12500  0.12500    0.1250    0.1250   0.12500
## Detection Rate        0.08625  0.06354  0.07167    0.1113    0.1036   0.09719
## Detection Prevalence  0.11979  0.09969  0.10521    0.1310    0.1323   0.15115
## Balanced Accuracy     0.82583  0.73351  0.76750    0.9337    0.8982   0.85792
##                      Class: e: Class: o:
## Sensitivity            0.76750   0.57000
## Specificity            0.94893   0.94405
## Pos Pred Value         0.68222   0.59272
## Neg Pred Value         0.96618   0.93891
## Prevalence             0.12500   0.12500
## Detection Rate         0.09594   0.07125
## Detection Prevalence   0.14062   0.12021
## Balanced Accuracy      0.85821   0.75702

Dividing the data into subsets to study each timbre in each rate:

#preparing vowel pairs:
Arabica = Arabic[Arabic$Timbre=='a',]
Arabica$Timbre = droplevels(Arabica$Timbre)
Arabica$V = droplevels(Arabica$V)

Arabici = Arabic[Arabic$Timbre=='i',]
Arabici$Timbre = droplevels(Arabici$Timbre)
Arabici$V = droplevels(Arabici$V)

Arabicu = Arabic[Arabic$Timbre=='u',]
Arabicu$Timbre = droplevels(Arabicu$Timbre)
Arabicu$V = droplevels(Arabicu$V)

Arabice = Arabic[Arabic$V=='e:' | Arabic$V=='i',]
Arabice$Timbre = droplevels(Arabice$Timbre)
Arabice$V = droplevels(Arabice$V)

Arabico = Arabic[Arabic$V=='o:' | Arabic$V=='u',]
Arabico$Timbre = droplevels(Arabico$Timbre)
Arabico$V = droplevels(Arabico$V)

LDA for all rates for each vowel pair:

#a-a:

model = lda(V ~ F1midnor+F2midnor, data=Arabica,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabica$V)/nrow(Arabica))
## [1] 0.7683333
#a-a: + duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabica,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabica$V)/nrow(Arabica))
## [1] 0.9091667
#a-a: + word-v-ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabica,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabica$V)/nrow(Arabica))
## [1] 0.8775
#i-i:

model = lda(V ~ F1midnor+F2midnor, data=Arabici,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabici$V)/nrow(Arabici))
## [1] 0.8604167
#i-i: + duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabici,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabici$V)/nrow(Arabici))
## [1] 0.9233333
#i-i: + word-v-ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabici,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabici$V)/nrow(Arabici))
## [1] 0.9354167
#u-u:

model = lda(V ~ F1midnor+F2midnor, data=Arabicu,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabicu$V)/nrow(Arabicu))
## [1] 0.72
#u-u: + duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabicu,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabicu$V)/nrow(Arabicu))
## [1] 0.8570833
#u-u: + word-v-ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabicu,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabicu$V)/nrow(Arabicu))
## [1] 0.80875
#i-e:

model = lda(V ~ F1midnor+F2midnor, data=Arabice,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabice$V)/nrow(Arabice))
## [1] 0.6279167
#i-e: + duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabice,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabice$V)/nrow(Arabice))
## [1] 0.8766667
#i-e: + word-v-ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabice,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabice$V)/nrow(Arabice))
## [1] 0.8491667
#u-o:

model = lda(V ~ F1midnor+F2midnor, data=Arabico,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabico$V)/nrow(Arabico))
## [1] 0.73875
#u-o: + duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabico,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabico$V)/nrow(Arabico))
## [1] 0.88375
#u-o: + word-v-ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabico,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabico$V)/nrow(Arabico))
## [1] 0.82875

LDA in each rate for each vowel pair:

#a-a: slow
Arabic_a=Arabica[Arabica$Rate=='slow',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.78125
#a-a: slow +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.9775
#a-a: slow +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.8925
#a-a: normal
Arabic_a=Arabica[Arabica$Rate=='normal',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.755
#a-a: normal +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.9625
#a-a: normal +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.85875
#a-a: fast
Arabic_a=Arabica[Arabica$Rate=='fast',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.7725
#a-a: fast +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.94625
#a-a: fast +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.8775
######

#i-i: slow
Arabic_i=Arabici[Arabici$Rate=='slow',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.9175
#i-i: slow +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.97125
#i-i: slow +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.96625
#i-i: normal
Arabic_i=Arabici[Arabici$Rate=='normal',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.8775
#i-i: normal +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.96375
#i-i: normal +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.9375
#i-i: fast
Arabic_i=Arabici[Arabici$Rate=='fast',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.81625
#i-i: fast +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.9025
#i-i: fast +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.9025
##########################

#u-u: slow
Arabic_u=Arabicu[Arabicu$Rate=='slow',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.77875
#u-u: slow +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.955
#u-u: slow +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.88625
#u-u: normal
Arabic_u=Arabicu[Arabicu$Rate=='normal',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.74375
#u-u: normal +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.9175
#u-u: normal +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.805
#u-u: fast
Arabic_u=Arabicu[Arabicu$Rate=='fast',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.655
#u-u: fast +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.85125
#u-u: fast +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.745
################

#i-e: slow
Arabic_e=Arabice[Arabice$Rate=='slow',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.59625
#i-e: slow +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.95
#i-e: slow +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.89125
#i-e: normal
Arabic_e=Arabice[Arabice$Rate=='normal',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.6325
#i-e: normal +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.93125
#i-e: normal +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.845
#i-e: fast
Arabic_e=Arabice[Arabice$Rate=='fast',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.6475
#i-e: fast +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.91125
#i-e: fast +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.81875
######################

#u-o: slow
Arabic_o=Arabico[Arabico$Rate=='slow',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.76125
#u-o: slow +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.94625
#u-o: slow +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.865
#u-o: normal
Arabic_o=Arabico[Arabico$Rate=='normal',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.75375
#u-o: normal +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.9225
#u-o: normal +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.84
#u-o: fast
Arabic_o=Arabico[Arabico$Rate=='fast',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.7325
#u-o: fast +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.8875
#u-o: fast +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.78875
#### extreme situation : 

##a-slow-a:-fast 
Arabic_a=Arabica[Arabica$Rate=='fast' & Arabica$V=='a:' | Arabica$Rate=='slow' & Arabica$V=='a',]


model = lda(V ~ F1midnor+F2midnor, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.72875
#a-slow-a:-fast  +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.79875
#a-slow-a:-fast  +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_a,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_a$V)/nrow(Arabic_a))
## [1] 0.8675
#####################

##i-slow-i:-fast

Arabic_i=Arabici[Arabici$Rate=='fast' & Arabici$V=='i:' | Arabici$Rate=='slow' & Arabici$V=='i',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.78375
#i-slow-i:-fast  +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.81
#i-slow-i:-fast  +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_i,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_i$V)/nrow(Arabic_i))
## [1] 0.8875
###################
#u-slow-u:-fast

Arabic_u=Arabicu[Arabicu$Rate=='fast' & Arabicu$V=='u:' | Arabicu$Rate=='slow' & Arabicu$V=='u',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.59625
#u-slow-u:-fast  +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.66375
#u-slow-u:-fast  +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_u,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_u$V)/nrow(Arabic_u))
## [1] 0.73875
###########################

#i-slow-e:-fast

Arabic_e=Arabice[Arabice$Rate=='fast' & Arabice$V=='e:' | Arabice$Rate=='slow' & Arabice$V=='i',]
model = lda(V ~ F1midnor+F2midnor, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.7175
#i-slow-e:-fast  +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.77625
#i-slow-e:-fast  +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_e,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_e$V)/nrow(Arabic_e))
## [1] 0.8225
###########################

#u-slow-o:-fast
Arabic_o=Arabico[Arabico$Rate=='fast' & Arabico$V=='o:' | Arabico$Rate=='slow' & Arabico$V=='u',]

model = lda(V ~ F1midnor+F2midnor, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.69625
#u-slow-o:-fast  +duration

model = lda(V ~ F1midnor+F2midnor+Duration, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.74375
#u-slow-o:-fast  +Word_V_ratio

model = lda(V ~ F1midnor+F2midnor+Word_V_ratio, data=Arabic_o,na.action = na.omit, CV=TRUE)
prev = model$class
print(sum(prev==Arabic_o$V)/nrow(Arabic_o))
## [1] 0.7975