]> git.donarmstrong.com Git - don_microarray.git/blobdiff - R/ma_loess_norm.R
add functions
[don_microarray.git] / R / ma_loess_norm.R
diff --git a/R/ma_loess_norm.R b/R/ma_loess_norm.R
new file mode 100644 (file)
index 0000000..250fb81
--- /dev/null
@@ -0,0 +1,86 @@
+library(stats)
+
+
+# the appropriate method is to average over the \hat{M}'s to find the
+# average fit, and then use that to correct the data.
+
+# ma.loessnorm <- function(array,iterations=2){
+#   n <- ncol(array)
+#   for (q in 1:iterations){
+#     bar.m.hat.array <- array(0,dim=dim(array))
+#     for (i in 1:(n-1)){
+#       for (j in (i+1):n){
+#         print(paste(i,j))
+#         # fit lowess
+#         # [we have to use loess so we can predict using predict.loess
+#         mva <- cbind(log2(array[,i]/array[,j]),0.5*log2(array[,i]*array[,j]))
+#         colnames(mva) <- c("m","a")
+#         mva <- data.frame(na.omit(mva))
+#         temp.loess <- loess(mva[,"m"]~mva[,"a"],control = loess.control(surface = "direct"))
+#         # predict for i, predict for j (\hat{M})
+#         # subtract out \hat{M} from M
+#         m.hat <- predict(temp.loess,mva[,"a"])
+#         # weight correction by the inverse of 2(n-1) . [so that there will be n adjustments]
+#         bar.m.hat.array[,i] <- bar.m.hat.array[,i]+m.hat/(n-1)
+#         bar.m.hat.array[,j] <- bar.m.hat.array[,j]+m.hat/(n-1)
+#         print(paste(i,j))
+#       }
+#     }
+#     # correct arrays
+#     result.array <- array(0,dim=dim(array))
+#     i <- 1
+#     for (j in (i+1):n){
+#       mva <- cbind(log2(array[,i]/array[,j]),0.5*log2(array[,i]*array[,j]))
+#       colnames(mva) <- c("m","a")
+#       if (j == 2){
+#         # correct Mi the first time through
+#         result.array[,i] <- 2^(mva[,"a"]+0.5*(mva[,"m"]-bar.m.hat.array[,i]))
+#       }
+#       result.array[,j] <- 2^(mva[,"a"]-0.5*(mva[,"m"]-bar.m.hat.array[,i]))
+#     }
+#     rownames(result.array) <- rownames(array)
+#     colnames(result.array) <- colnames(array)
+#     array <- result.array
+#   }
+#   return(array)
+# }
+
+
+ma.loessnorm2 <- function(array,iterations=2,small.positive=0.1,print.status=F){
+  n <- ncol(array)
+  # strip na values.
+  array <- na.omit(array)
+  for (q in 1:iterations){
+    if (print.status){
+      print(q)
+    }
+    result.array <- array(0,dim=dim(array))
+    for (i in 1:(n-1)){
+      for (j in (i+1):n){
+        #print(paste(i,j))
+        # replace 0's or negative values with a small positive value
+        array[array[,i]<=0,i] <- small.positive
+        array[array[,j]<=0,j] <- small.positive
+        # fit lowess
+        # [we have to use loess so we can predict using predict.loess
+        mva <- cbind(log2(array[,i]/array[,j]),0.5*log2(array[,i]*array[,j]))
+        colnames(mva) <- c("m","a")
+        mva <- data.frame(mva)
+        #temp.loess <- loess(m~a,mva,control = loess.control(surface = "direct"))
+        temp.loess <- loess(m~a,mva,control=loess.control(surface="interpolate",statistics="approximate",
+                                                          trace.hat="approximate",iterations=2))
+        # predict for i, predict for j (\hat{M})
+        # subtract out \hat{M} from M
+        m.hat <- predict(temp.loess,mva[,"a"])
+        # weight correction by the inverse of 2(n-1) . [so that there will be n adjustments]
+        result.array[,i] <- result.array[,i]+2^(mva[,"a"]+0.5*(mva[,"m"]-m.hat))/(n-1)
+        result.array[,j] <- result.array[,j]+2^(mva[,"a"]-0.5*(mva[,"m"]-m.hat))/(n-1)
+      }
+    }
+    # correct arrays
+    rownames(result.array) <- rownames(array)
+    colnames(result.array) <- colnames(array)
+    array <- result.array
+  }
+  return(array)
+}