今天,我将尝试在ND4J和Deeplearnint4j项目中使用CUDA。之后,Neural Net(从Keras进口)开始更快地工作。但是下一个代码开始工作缓慢
我已经尝试将ND4J后端更改为本机(CPU),并且得到了快速的结果。
promlem部分以注释突出显示(2行)
import com.rabbitmq.client.Channel;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.ops.transforms.Transforms;
import java.io.IOException;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
public class GraphUpdater implements Runnable {
private Pair pubPair;
private ConcurrentHashMap<Integer, INDArray> pubsList;
private Connection connectionMain;
private Connection connectionSite;
private Channel channel;
GraphUpdater(Pair pubPair, ConcurrentHashMap<Integer, INDArray> pubsList, Channel channel) throws SQLException {
this.pubPair = pubPair;
this.channel = channel;
this.pubsList = pubsList;
connectionMain = DataBaseConnectionsPool.getConnection();
connectionSite = DataBaseConnectionsPool.getConnectionSite();
}
@Override
public void run(){
try {
channel.basicAck(pubPair.deliveryTag, false);
} catch (IOException e) {
System.out.println("Error, pub="+pubPair.pub);
e.printStackTrace();
}
PreparedStatement st;
PreparedStatement stNew;
try {
st = connectionMain.prepareStatement("update vec_graph set closed_pubs=closed_pubs || ? where pub=?");
stNew = connectionMain.prepareStatement("insert into vec_graph values (?, ?)");
Statement psNew = connectionMain.createStatement();
ResultSet rs = psNew.executeQuery("select * from new_public_vectors where pub="+pubPair.pub);
float[] _floatArr = new float[64];
while (rs.next()){
Array arr = rs.getArray("vector");
Object[] obj = (Object[]) arr.getArray();
for (int vIndex=0; vIndex < 64; vIndex++){
_floatArr[vIndex] = (float)(double)obj[vIndex];
}
pubsList.put(rs.getInt(1), Nd4j.create(_floatArr));
}
//pub from task X all pubs from db
int pub = pubPair.pub;
List<Integer> closed = new ArrayList<>();
double mean = 0.96D;
INDArray currentVector = pubsList.get(pub);
//!%!%!%!%slowly part of code
for (int pubId : pubsList.keySet()) {
INDArray publicVector = pubsList.get(pubId);
if (currentVector == null || pub == pubId || publicVector == null){
continue;
}
//!%!%!%!%mega slowly part of code, ~99% of CPU time in VisualVM
double dist = -Transforms.cosineDistance(currentVector, publicVector) + 1; // Transfer from cosine sim to cosine dist
if ((dist - mean) < 0.01 && (dist - mean) > 0){
mean = (mean+dist)/2;
}else if (dist > mean){
mean = dist;
closed.clear();
st.clearBatch();
}else{
continue;
}
Array a = connectionMain.createArrayOf("int", new Object[]{pub});
st.setArray(1, a);
st.setInt(2, pubId);
st.addBatch();
closed.add(pubId);
}
Object[] obj_vector = new Object[closed.size()];
for (int i = 0; i < closed.size(); i++){
obj_vector[i] = closed.get(i);
}
Array closedArray = connectionMain.createArrayOf("int", obj_vector);
stNew.setInt(1, pub);
stNew.setArray(2, closedArray);
stNew.addBatch();
if (pubPair.byUser != 0){
showToUser(closed, pub, pubPair.byUser);
}
try {
st.executeBatch();
stNew.executeBatch();
}catch (BatchUpdateException e){
e.printStackTrace();
e.getNextException().printStackTrace();
}
} catch (BatchUpdateException e){
e.printStackTrace();
e.getNextException().printStackTrace();
} catch (SQLException e) {
e.printStackTrace();
}finally {
try {
connectionMain.close();
connectionSite.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}
我想从这个清单中得到一些东西:
获得更快的结果并使用GPU
关闭这部分代码的GPU,并保留其用于NN
最佳答案
好的,我将用cosineDistance重写部分代码到我自己的实现中