Skip to content

Commit b1e3879

Browse files
refactor/cleanup: replace some &vec![_] with &[_], fix redundant clone() [SKIP_CHANGELOG]
1 parent 4a21001 commit b1e3879

File tree

8 files changed

+30
-30
lines changed

8 files changed

+30
-30
lines changed

Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ keywords = ["deep-learning", "neural-networks", "machine-learning", "framework"]
1414
license = "MIT OR Apache-2.0"
1515

1616
[dependencies]
17-
collenchyma = { version = "0.0.8", default-features = false, features = ["native"] } # native feature to read/write data into tensors
17+
collenchyma = { version = "0.0.9", default-features = false, features = ["native"] } # native feature to read/write data into tensors
1818
collenchyma-blas = { version = "0.2.0", default-features = false, features = ["native"] } # only compiles with native feature
1919
collenchyma-nn = { version = "0.3.2", default-features = false }
2020

benches/network_benches.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,8 @@ mod cuda {
7272
fn bench_mnsit_forward_1(b: &mut Bencher) {
7373
let mut cfg = SequentialConfig::default();
7474
// set up input
75-
cfg.add_input("in", &vec![1, 30, 30]);
76-
cfg.add_input("label", &vec![1, 1, 10]);
75+
cfg.add_input("in", &[1, 30, 30]);
76+
cfg.add_input("label", &[1, 1, 10]);
7777
// set up sigmoid
7878
let mut sig_cfg = LayerConfig::new("sig", LayerType::Sigmoid);
7979
sig_cfg.add_input("in");
@@ -96,7 +96,7 @@ mod cuda {
9696
backend.clone(), &LayerConfig::new("network", LayerType::Sequential(cfg)));
9797

9898
let _ = timeit_loops!(10, {
99-
let inp = SharedTensor::<f32>::new(backend.device(), &vec![1, 30, 30]).unwrap();
99+
let inp = SharedTensor::<f32>::new(backend.device(), &[1, 30, 30]).unwrap();
100100
let inp_lock = Arc::new(RwLock::new(inp));
101101

102102
network.forward(&[inp_lock]);
@@ -120,7 +120,7 @@ mod cuda {
120120
fn alexnet_forward(b: &mut Bencher) {
121121
let mut cfg = SequentialConfig::default();
122122
// Layer: data
123-
cfg.add_input("data", &vec![128, 3, 224, 224]);
123+
cfg.add_input("data", &[128, 3, 224, 224]);
124124
// Layer: conv1
125125
let conv1_layer_cfg = ConvolutionConfig {
126126
num_output: 64,
@@ -260,7 +260,7 @@ mod cuda {
260260

261261
let func = || {
262262
let forward_time = timeit_loops!(1, {
263-
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 112, 112]).unwrap();
263+
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 112, 112]).unwrap();
264264

265265
let inp_lock = Arc::new(RwLock::new(inp));
266266
network.forward(&[inp_lock]);
@@ -277,7 +277,7 @@ mod cuda {
277277
// let _ = env_logger::init();
278278
let mut cfg = SequentialConfig::default();
279279
// Layer: data
280-
cfg.add_input("data", &vec![128, 3, 112, 112]);
280+
cfg.add_input("data", &[128, 3, 112, 112]);
281281
// Layer: conv1
282282
let conv1_layer_cfg = ConvolutionConfig {
283283
num_output: 32,
@@ -416,7 +416,7 @@ mod cuda {
416416
backend.clone(), &LayerConfig::new("network", LayerType::Sequential(cfg)));
417417

418418
let mut func = || {
419-
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 112, 112]).unwrap();
419+
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 112, 112]).unwrap();
420420

421421
let inp_lock = Arc::new(RwLock::new(inp));
422422
network.forward(&[inp_lock]);

examples/benchmarks.rs

+6-6
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ fn bench_alexnet() {
120120
#[cfg(all(feature="cuda", not(feature="native")))]
121121
fn bench_alexnet() {
122122
let mut cfg = SequentialConfig::default();
123-
cfg.add_input("data", &vec![128, 3, 224, 224]);
123+
cfg.add_input("data", &[128, 3, 224, 224]);
124124

125125
let conv1_layer_cfg = ConvolutionConfig { num_output: 64, filter_shape: vec![11], padding: vec![2], stride: vec![4] };
126126
cfg.add_layer(LayerConfig::new("conv1", conv1_layer_cfg));
@@ -160,7 +160,7 @@ fn bench_alexnet() {
160160
let func = || {
161161
let forward_time = timeit_loops!(1, {
162162
{
163-
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 224, 224]).unwrap();
163+
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 224, 224]).unwrap();
164164

165165
let inp_lock = Arc::new(RwLock::new(inp));
166166
network.forward(&[inp_lock.clone()]);
@@ -202,7 +202,7 @@ fn bench_overfeat() {
202202
#[cfg(all(feature="cuda", not(feature="native")))]
203203
fn bench_overfeat() {
204204
let mut cfg = SequentialConfig::default();
205-
cfg.add_input("data", &vec![128, 3, 231, 231]);
205+
cfg.add_input("data", &[128, 3, 231, 231]);
206206

207207
let conv1_layer_cfg = ConvolutionConfig { num_output: 96, filter_shape: vec![11], padding: vec![0], stride: vec![4] };
208208
cfg.add_layer(LayerConfig::new("conv1", conv1_layer_cfg));
@@ -242,7 +242,7 @@ fn bench_overfeat() {
242242
let func = || {
243243
let forward_time = timeit_loops!(1, {
244244
{
245-
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 231, 231]).unwrap();
245+
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 231, 231]).unwrap();
246246

247247
let inp_lock = Arc::new(RwLock::new(inp));
248248
network.forward(&[inp_lock.clone()]);
@@ -284,7 +284,7 @@ fn bench_vgg_a() {
284284
#[cfg(all(feature="cuda", not(feature="native")))]
285285
fn bench_vgg_a() {
286286
let mut cfg = SequentialConfig::default();
287-
cfg.add_input("data", &vec![64, 3, 224, 224]);
287+
cfg.add_input("data", &[64, 3, 224, 224]);
288288

289289
let conv1_layer_cfg = ConvolutionConfig { num_output: 64, filter_shape: vec![3], padding: vec![1], stride: vec![1] };
290290
cfg.add_layer(LayerConfig::new("conv1", conv1_layer_cfg));
@@ -339,7 +339,7 @@ fn bench_vgg_a() {
339339
let func = || {
340340
let forward_time = timeit_loops!(1, {
341341
{
342-
let inp = SharedTensor::<f32>::new(backend.device(), &vec![64, 3, 224, 224]).unwrap();
342+
let inp = SharedTensor::<f32>::new(backend.device(), &[64, 3, 224, 224]).unwrap();
343343

344344
let inp_lock = Arc::new(RwLock::new(inp));
345345
network.forward(&[inp_lock.clone()]);

src/layer.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,8 @@ impl<B: IBackend> Layer<B> {
203203
}
204204

205205
let backend: Rc<IBackend<F=B::F>> = self.backend.clone();
206-
blob_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
207-
blob_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
206+
blob_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
207+
blob_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
208208
}
209209
self.output_blob_names.push(blob_name.clone());
210210
self.output_blobs_data.push(blob_data.clone());
@@ -227,8 +227,8 @@ impl<B: IBackend> Layer<B> {
227227
info!("{} -> {}", self.name, blob_name);
228228

229229
let backend: Rc<IBackend<F=B::F>> = self.backend.clone();
230-
let output_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
231-
let output_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
230+
let output_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
231+
let output_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
232232
self.output_blobs_data.push(output_data);
233233
self.output_blobs_gradient.push(output_gradient);
234234
}
@@ -460,7 +460,7 @@ impl<B: IBackend> Layer<B> {
460460

461461
let forward_time = timeit_loops!(1, {
462462
if self.is_using_in_place() {
463-
self.worker.forward(&self.backend, &vec![], &self.weights_data, &mut self.output_blobs_data);
463+
self.worker.forward(&self.backend, &[], &self.weights_data, &mut self.output_blobs_data);
464464
} else {
465465
self.worker.forward(&self.backend, &self.input_blobs_data, &self.weights_data, &mut self.output_blobs_data);
466466
}
@@ -498,8 +498,8 @@ impl<B: IBackend> Layer<B> {
498498
if self.is_using_in_place() {
499499
self.worker.backward_input(&self.backend,
500500
&self.weights_data,
501-
&vec![],
502-
&vec![],
501+
&[],
502+
&[],
503503
&self.input_blobs_data,
504504
&mut self.input_blobs_gradient)
505505
} else {

src/layers/common/convolution.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -252,12 +252,12 @@ mod tests {
252252
stride: vec![4],
253253
};
254254
let layer = Convolution::<Backend<Cuda>>::from_config(&cfg);
255-
let num_spatial_dims = layer.num_spatial_dims(&vec![1, 3, 224, 224]);
255+
let num_spatial_dims = layer.num_spatial_dims(&[1, 3, 224, 224]);
256256
assert_eq!(2, num_spatial_dims);
257257
assert_eq!(vec![11, 11], layer.spatial_filter_dims(2));
258258
assert_eq!(vec![2, 2], layer.padding_dims(2));
259259
assert_eq!(vec![4, 4], layer.stride_dims(2));
260-
assert_eq!(vec![64, 3, 11, 11], layer.calculate_filter_shape(&vec![1, 3, 224, 224]));
261-
assert_eq!(vec![1, 64, 55, 55], layer.calculate_output_shape(&vec![1, 3, 224, 224]));
260+
assert_eq!(vec![64, 3, 11, 11], layer.calculate_filter_shape(&[1, 3, 224, 224]));
261+
assert_eq!(vec![1, 64, 55, 55], layer.calculate_output_shape(&[1, 3, 224, 224]));
262262
}
263263
}

src/layers/common/sequential.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ impl<B: IBackend + LayerOps<f32> + 'static> Sequential<B> {
4343
pub fn from_config(backend: Rc<B>, config: &SequentialConfig) -> Sequential<B> {
4444
let mut layer = Self::empty();
4545

46-
layer.init_layers(backend, &config.clone());
46+
layer.init_layers(backend, config);
4747

4848
layer
4949
}

src/util.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ pub fn write_batch_sample<T: NumCast + ::std::marker::Copy>(tensor: &mut SharedT
5959
/// Create a Collenchyma SharedTensor for a scalar value.
6060
pub fn native_scalar<T: NumCast + ::std::marker::Copy>(scalar: T) -> SharedTensor<T> {
6161
let native = native_backend();
62-
let mut shared_scalar = SharedTensor::<T>::new(native.device(), &vec![1]).unwrap();
62+
let mut shared_scalar = SharedTensor::<T>::new(native.device(), &1).unwrap();
6363
write_to_memory(shared_scalar.get_mut(native.device()).unwrap(), &[scalar]);
6464

6565
shared_scalar

tests/layer_specs.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ mod layer_spec {
6060
#[test]
6161
fn can_create_single_layer_sequential_layer() {
6262
let mut model = SequentialConfig::default();
63-
model.add_input("data", &vec![28, 28]);
63+
model.add_input("data", &[28, 28]);
6464
model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
6565

6666
Layer::from_config(cuda_backend(), &LayerConfig::new("model", LayerType::Sequential(model)));
@@ -69,7 +69,7 @@ mod layer_spec {
6969
#[test]
7070
fn can_create_simple_network_sequential_layer() {
7171
let mut model = SequentialConfig::default();
72-
model.add_input("data", &vec![1, 784]);
72+
model.add_input("data", &[1, 784]);
7373
model.add_layer(LayerConfig::new("linear1", LinearConfig { output_size: 1568 }));
7474
model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
7575
model.add_layer(LayerConfig::new("linear2", LinearConfig { output_size: 10 }));
@@ -83,12 +83,12 @@ mod layer_spec {
8383
let cuda_backend = cuda_backend();
8484

8585
let mut normal_model = SequentialConfig::default();
86-
normal_model.add_input("data", &vec![3]);
86+
normal_model.add_input("data", &[3]);
8787
normal_model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
8888
let mut normal_network = Layer::from_config(cuda_backend.clone(), &LayerConfig::new("normal_model", LayerType::Sequential(normal_model)));
8989

9090
let mut reshape_model = SequentialConfig::default();
91-
reshape_model.add_input("data", &vec![3]);
91+
reshape_model.add_input("data", &[3]);
9292
reshape_model.add_layer(LayerConfig::new("reshape", ReshapeConfig { shape: vec![1, 1, 3] }));
9393
reshape_model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
9494
let mut reshape_network = Layer::from_config(cuda_backend.clone(), &LayerConfig::new("reshape_model", LayerType::Sequential(reshape_model)));

0 commit comments

Comments
 (0)