Cleanup: Change 1 used in bitshifts to 1ULL
Coverity complains about this. It is not a bug as long as we never shift by more than 31, but it is not terrible to change the constants from 1 to 1ULL as clean up. Reviewed-by: Ryan Moeller <ryan@iXsystems.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu> Closes #13914
This commit is contained in:
parent
c629f0bf62
commit
e506a0ce40
|
@ -6203,10 +6203,10 @@ zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
|
|||
*/
|
||||
for (uint64_t inner_offset = 0;
|
||||
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
|
||||
inner_offset += 1 << vd->vdev_ashift) {
|
||||
inner_offset += 1ULL << vd->vdev_ashift) {
|
||||
if (range_tree_contains(msp->ms_allocatable,
|
||||
offset + inner_offset, 1 << vd->vdev_ashift)) {
|
||||
obsolete_bytes += 1 << vd->vdev_ashift;
|
||||
offset + inner_offset, 1ULL << vd->vdev_ashift)) {
|
||||
obsolete_bytes += 1ULL << vd->vdev_ashift;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3000,7 +3000,7 @@ scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
|
|||
* otherwise we leave shorter remnant every txg.
|
||||
*/
|
||||
uint64_t start;
|
||||
uint64_t size = 1 << rt->rt_shift;
|
||||
uint64_t size = 1ULL << rt->rt_shift;
|
||||
range_seg_t *addr_rs;
|
||||
if (queue->q_last_ext_addr != -1) {
|
||||
start = queue->q_last_ext_addr;
|
||||
|
|
|
@ -1449,7 +1449,7 @@ metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
|
|||
zfs_btree_t *size_tree = mrap->mra_bt;
|
||||
|
||||
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
|
||||
(1 << mrap->mra_floor_shift))
|
||||
(1ULL << mrap->mra_floor_shift))
|
||||
return;
|
||||
|
||||
zfs_btree_add(size_tree, rs);
|
||||
|
@ -1461,7 +1461,7 @@ metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
|
|||
metaslab_rt_arg_t *mrap = arg;
|
||||
zfs_btree_t *size_tree = mrap->mra_bt;
|
||||
|
||||
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 <<
|
||||
if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
|
||||
mrap->mra_floor_shift))
|
||||
return;
|
||||
|
||||
|
@ -3552,7 +3552,7 @@ metaslab_should_condense(metaslab_t *msp)
|
|||
{
|
||||
space_map_t *sm = msp->ms_sm;
|
||||
vdev_t *vd = msp->ms_group->mg_vd;
|
||||
uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
|
||||
uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
|
||||
|
||||
ASSERT(MUTEX_HELD(&msp->ms_lock));
|
||||
ASSERT(msp->ms_loaded);
|
||||
|
|
Loading…
Reference in New Issue