hi,
i’m trying to train a really really simple linear regression with gluon on mxnet but the traceback is as follows:
Traceback (most recent call last):
File "a.py", line 37, in <module>
output = model(x)
File "C:\python36\lib\site-packages\mxnet\gluon\block.py", line 413, in __call__
return self.forward(*args)
File "C:\python36\lib\site-packages\mxnet\gluon\block.py", line 629, in forward
return self.hybrid_forward(ndarray, x, *args, **params)
File "C:\python36\lib\site-packages\mxnet\gluon\nn\basic_layers.py", line 207, in hybrid_forward
flatten=self._flatten, name='fwd')
File "<string>", line 78, in FullyConnected
File "C:\python36\lib\site-packages\mxnet\_ctypes\ndarray.py", line 92, in _imperative_invoke
ctypes.byref(out_stypes)))
File "C:\python36\lib\site-packages\mxnet\base.py", line 149, in check_call
raise MXNetError(py_str(_LIB.MXGetLastError()))
mxnet.base.MXNetError: [08:59:00] c:\projects\mxnet-distro-win\mxnet-build\src\io\../operator/elemwise_op_common.h:123: Check failed: assign(&dattr, (*vec)[i]) Incompatible attr in node at 1-th input: expected int64, got float32
this is my code to load and train:
from sklearn.model_selection import *
import mxnet as mx
from mxnet import gluon, nd, autograd, metric
import numpy as np
import csv
x, y=[], []
with open("a.csv", newline="\n") as f:
r=csv.reader(f)
for row in r:
x.append(row[0])
y.append(row[1])
x=np.array(x, dtype = np.int64).reshape(-1, 1).astype(np.int64)
y=np.array(y, dtype = np.int64).astype(np.int64)
x_train, x_test, y_train, y_test=train_test_split(x, y, test_size=0.20, shuffle=False)
# make our regression model (a Dense layour)
model = gluon.nn.Dense(1)
# initialize and fit
batch_size = 10
epochs = 100
train_dataset = mx.gluon.data.ArrayDataset(x_train, y_train)
train_iter = mx.gluon.data.DataLoader(train_dataset, last_batch = "rollover", batch_size = batch_size)
# initialize
model.collect_params().initialize(mx.initializer.Normal(), ctx=mx.cpu())
trainer = mx.gluon.Trainer(model.collect_params(), "sgd", {"learning_rate":0.0001 } )
# train
for e in range(epochs):
for i, (x, y) in enumerate(train_iter):
x.attach_grad()
y.attach_grad()
with autograd.record():
output = model(x)
loss = mx.gluon.loss.L2Loss(output, y)
loss.backward()
trainer.step(x.shape[0])
and, here is how my a.csv looks like:
0,0
1,4
2,8
3,12
4,16
5,20
6,24
7,28
8,32
9,36
thanks.