Skip to content

Commit 432e33c

Browse files
refactor/layer: remove redundant .enumerate() calls [SKIP_CHANGELOG]
1 parent 6f41247 commit 432e33c

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

src/layer.rs

+12-12
Original file line numberDiff line numberDiff line change
@@ -756,15 +756,15 @@ pub trait ILayer<B: IBackend> : ComputeOutput<f32, B> + ComputeInputGradient<f32
756756
output_data: &mut [ArcLock<SharedTensor<f32>>]) {
757757
// aquire all the locks
758758
let inp: Vec<_> = input_data.iter().map(|b| b.read().unwrap()).collect();
759-
let input_data_: Vec<&SharedTensor<f32>> = inp.iter().enumerate().map(|(_, val)| &**val).collect();
759+
let input_data_: Vec<&SharedTensor<f32>> = inp.iter().map(|val| &**val).collect();
760760

761761
let wgts: Vec<_> = weights_data.iter().map(|w| w.read().unwrap()).collect();
762-
let weights_data_: Vec<&SharedTensor<f32>> = wgts.iter().enumerate().map(|(_, val)| &**val).collect();
762+
let weights_data_: Vec<&SharedTensor<f32>> = wgts.iter().map(|val| &**val).collect();
763763

764764
let out_ref = output_data.iter().cloned().collect::<Vec<_>>();
765765
let mut out = &mut out_ref.iter().map(|b| b.write().unwrap()).collect::<Vec<_>>();
766766
let mut output_w = &mut out.iter_mut().map(|a| a).collect::<Vec<_>>();
767-
let mut output_data_: Vec<&mut SharedTensor<f32>> = output_w.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect();
767+
let mut output_data_: Vec<&mut SharedTensor<f32>> = output_w.iter_mut().map(|val| &mut ***val).collect();
768768

769769
self.compute_output(backend, &weights_data_, &input_data_, &mut output_data_);
770770
}
@@ -785,17 +785,17 @@ pub trait ILayer<B: IBackend> : ComputeOutput<f32, B> + ComputeInputGradient<f32
785785
input_data: &[ArcLock<SharedTensor<f32>>],
786786
input_gradients: &mut [ArcLock<SharedTensor<f32>>]) {
787787
let wgts_data: Vec<_> = weights_data.iter().map(|b| b.read().unwrap()).collect();
788-
let weights_data_: Vec<&SharedTensor<f32>> = wgts_data.iter().enumerate().map(|(_, val)| &**val).collect();
788+
let weights_data_: Vec<&SharedTensor<f32>> = wgts_data.iter().map(|val| &**val).collect();
789789
let out_data: Vec<_> = output_data.iter().map(|b| b.read().unwrap()).collect();
790-
let output_data_: Vec<&SharedTensor<f32>> = out_data.iter().enumerate().map(|(_, val)| &**val).collect();
790+
let output_data_: Vec<&SharedTensor<f32>> = out_data.iter().map(|val| &**val).collect();
791791
let out_gradient: Vec<_> = output_gradients.iter().map(|b| b.read().unwrap()).collect();
792-
let output_gradients_: Vec<&SharedTensor<f32>> = out_gradient.iter().enumerate().map(|(_, val)| &**val).collect();
792+
let output_gradients_: Vec<&SharedTensor<f32>> = out_gradient.iter().map(|val| &**val).collect();
793793
let inp_data: Vec<_> = input_data.iter().map(|b| b.read().unwrap()).collect();
794-
let input_data_: Vec<&SharedTensor<f32>> = inp_data.iter().enumerate().map(|(_, val)| &**val).collect();
794+
let input_data_: Vec<&SharedTensor<f32>> = inp_data.iter().map(|val| &**val).collect();
795795
let btm_gradient_ref = input_gradients.iter().cloned().collect::<Vec<_>>();
796796
let mut btm_gradient = &mut btm_gradient_ref.iter().map(|b| b.write().unwrap()).collect::<Vec<_>>();
797797
let mut input_gradient = &mut btm_gradient.iter_mut().map(|a| a).collect::<Vec<_>>();
798-
let mut input_gradients_: Vec<&mut SharedTensor<f32>> = input_gradient.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect();
798+
let mut input_gradients_: Vec<&mut SharedTensor<f32>> = input_gradient.iter_mut().map(|val| &mut ***val).collect();
799799

800800
self.compute_input_gradient(backend, &weights_data_, &output_data_, &output_gradients_, &input_data_, &mut input_gradients_);
801801
}
@@ -815,15 +815,15 @@ pub trait ILayer<B: IBackend> : ComputeOutput<f32, B> + ComputeInputGradient<f32
815815
input_data: &[ArcLock<SharedTensor<f32>>],
816816
weights_gradients: &mut [ArcLock<SharedTensor<f32>>]) {
817817
let out_data: Vec<_> = output_data.iter().map(|b| b.read().unwrap()).collect();
818-
let output_data_: Vec<&SharedTensor<f32>> = out_data.iter().enumerate().map(|(_, val)| &**val).collect();
818+
let output_data_: Vec<&SharedTensor<f32>> = out_data.iter().map(|val| &**val).collect();
819819
let out_gradients: Vec<_> = output_gradients.iter().map(|b| b.read().unwrap()).collect();
820-
let output_gradients_: Vec<&SharedTensor<f32>> = out_gradients.iter().enumerate().map(|(_, val)| &**val).collect();
820+
let output_gradients_: Vec<&SharedTensor<f32>> = out_gradients.iter().map(|val| &**val).collect();
821821
let inp_data: Vec<_> = input_data.iter().map(|b| b.read().unwrap()).collect();
822-
let input_data_: Vec<&SharedTensor<f32>> = inp_data.iter().enumerate().map(|(_, val)| &**val).collect();
822+
let input_data_: Vec<&SharedTensor<f32>> = inp_data.iter().map(|val| &**val).collect();
823823
let wgt_gradient_ref = weights_gradients.iter().cloned().collect::<Vec<_>>();
824824
let mut wgt_gradient = &mut wgt_gradient_ref.iter().map(|b| b.write().unwrap()).collect::<Vec<_>>();
825825
let mut weights_gradient = &mut wgt_gradient.iter_mut().map(|a| a).collect::<Vec<_>>();
826-
let mut weights_gradients_: Vec<&mut SharedTensor<f32>> = weights_gradient.iter_mut().enumerate().map(|(_, val)| &mut ***val).collect();
826+
let mut weights_gradients_: Vec<&mut SharedTensor<f32>> = weights_gradient.iter_mut().map(|val| &mut ***val).collect();
827827

828828
self.compute_parameters_gradient(backend, &output_data_, &output_gradients_, &input_data_, &mut weights_gradients_);
829829
}

0 commit comments

Comments
 (0)