1

Merge branch 'perf-tools' into perf-tools-next

To get some fixes in the perf test and JSON metrics into the development
branch.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
This commit is contained in:
Namhyung Kim 2024-02-12 12:19:21 -08:00
commit 39d14c0dd6
210 changed files with 2055 additions and 1016 deletions

View File

@ -145,7 +145,9 @@ filesystem, an overlay filesystem needs to record in the upper filesystem
that files have been removed. This is done using whiteouts and opaque
directories (non-directories are always opaque).
A whiteout is created as a character device with 0/0 device number.
A whiteout is created as a character device with 0/0 device number or
as a zero-size regular file with the xattr "trusted.overlay.whiteout".
When a whiteout is found in the upper level of a merged directory, any
matching name in the lower level is ignored, and the whiteout itself
is also hidden.
@ -154,6 +156,13 @@ A directory is made opaque by setting the xattr "trusted.overlay.opaque"
to "y". Where the upper filesystem contains an opaque directory, any
directory in the lower filesystem with the same name is ignored.
An opaque directory should not conntain any whiteouts, because they do not
serve any purpose. A merge directory containing regular files with the xattr
"trusted.overlay.whiteout", should be additionally marked by setting the xattr
"trusted.overlay.opaque" to "x" on the merge directory itself.
This is needed to avoid the overhead of checking the "trusted.overlay.whiteout"
on all entries during readdir in the common case.
readdir
-------
@ -534,8 +543,9 @@ A lower dir with a regular whiteout will always be handled by the overlayfs
mount, so to support storing an effective whiteout file in an overlayfs mount an
alternative form of whiteout is supported. This form is a regular, zero-size
file with the "overlay.whiteout" xattr set, inside a directory with the
"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
but can be used by userspace tools (like containers) that generate lower layers.
"overlay.opaque" xattr set to "x" (see `whiteouts and opaque directories`_).
These alternative whiteouts are never created by overlayfs, but can be used by
userspace tools (like containers) that generate lower layers.
These alternative whiteouts can be escaped using the standard xattr escape
mechanism in order to properly nest to any depth.

View File

@ -4547,7 +4547,7 @@ F: drivers/net/ieee802154/ca8210.c
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
M: David Howells <dhowells@redhat.com>
L: linux-cachefs@redhat.com (moderated for non-subscribers)
L: netfs@lists.linux.dev
S: Supported
F: Documentation/filesystems/caching/cachefiles.rst
F: fs/cachefiles/
@ -7955,12 +7955,13 @@ L: rust-for-linux@vger.kernel.org
S: Maintained
F: rust/kernel/net/phy.rs
EXEC & BINFMT API
EXEC & BINFMT API, ELF
R: Eric Biederman <ebiederm@xmission.com>
R: Kees Cook <keescook@chromium.org>
L: linux-mm@kvack.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
F: Documentation/userspace-api/ELF.rst
F: fs/*binfmt_*.c
F: fs/exec.c
F: include/linux/binfmts.h
@ -8223,7 +8224,8 @@ F: include/linux/iomap.h
FILESYSTEMS [NETFS LIBRARY]
M: David Howells <dhowells@redhat.com>
L: linux-cachefs@redhat.com (moderated for non-subscribers)
R: Jeff Layton <jlayton@kernel.org>
L: netfs@lists.linux.dev
L: linux-fsdevel@vger.kernel.org
S: Supported
F: Documentation/filesystems/caching/

View File

@ -986,6 +986,10 @@ NOSTDINC_FLAGS += -nostdinc
# perform bounds checking.
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
#Currently, disable -Wstringop-overflow for GCC 11, globally.
KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow

View File

@ -795,6 +795,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
bool save_ret;
u32 insn;
@ -878,7 +879,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
stack_size = round_up(stack_size, 16);
if (func_addr) {
if (!is_struct_ops) {
/* For the trampoline called from function entry,
* the frame of traced function and the frame of
* trampoline need to be considered.
@ -998,7 +999,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
if (func_addr) {
if (!is_struct_ops) {
/* trampoline called from function entry */
emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);

View File

@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
static void rbd_lock_del_request(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
bool need_wakeup;
bool need_wakeup = false;
lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock);
rbd_assert(!list_empty(&img_req->lock_item));
list_del_init(&img_req->lock_item);
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
list_empty(&rbd_dev->running_list));
if (!list_empty(&img_req->lock_item)) {
list_del_init(&img_req->lock_item);
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
list_empty(&rbd_dev->running_list));
}
spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup)
complete(&rbd_dev->releasing_wait);
@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
return;
}
list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
while (!list_empty(&rbd_dev->acquiring_list)) {
img_req = list_first_entry(&rbd_dev->acquiring_list,
struct rbd_img_request, lock_item);
mutex_lock(&img_req->state_mutex);
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
if (!result)
list_move_tail(&img_req->lock_item,
&rbd_dev->running_list);
else
list_del_init(&img_req->lock_item);
rbd_img_schedule(img_req, result);
mutex_unlock(&img_req->state_mutex);
}
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}
static bool locker_equal(const struct ceph_locker *lhs,
@ -5326,7 +5332,7 @@ static void rbd_dev_release(struct device *dev)
if (need_put) {
destroy_workqueue(rbd_dev->task_wq);
ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
}
rbd_dev_free(rbd_dev);
@ -5402,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
/* get an id and fill in device name */
rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
minor_to_rbd_dev_id(1 << MINORBITS),
GFP_KERNEL);
rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
minor_to_rbd_dev_id(1 << MINORBITS) - 1,
GFP_KERNEL);
if (rbd_dev->dev_id < 0)
goto fail_rbd_dev;
@ -5425,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return rbd_dev;
fail_dev_id:
ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
fail_rbd_dev:
rbd_dev_free(rbd_dev);
return NULL;

View File

@ -29,8 +29,6 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_PIN_REGISTERED(p) \
WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@ -425,6 +423,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
}
EXPORT_SYMBOL_GPL(dpll_device_unregister);
static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
{
kfree(prop->package_label);
kfree(prop->panel_label);
kfree(prop->board_label);
kfree(prop->freq_supported);
}
static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
struct dpll_pin_properties *dst)
{
memcpy(dst, src, sizeof(*dst));
if (src->freq_supported && src->freq_supported_num) {
size_t freq_size = src->freq_supported_num *
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
freq_size, GFP_KERNEL);
if (!src->freq_supported)
return -ENOMEM;
}
if (src->board_label) {
dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
if (!dst->board_label)
goto err_board_label;
}
if (src->panel_label) {
dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
if (!dst->panel_label)
goto err_panel_label;
}
if (src->package_label) {
dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
if (!dst->package_label)
goto err_package_label;
}
return 0;
err_package_label:
kfree(dst->panel_label);
err_panel_label:
kfree(dst->board_label);
err_board_label:
kfree(dst->freq_supported);
return -ENOMEM;
}
static struct dpll_pin *
dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
const struct dpll_pin_properties *prop)
@ -441,20 +486,24 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
prop->type > DPLL_PIN_TYPE_MAX)) {
ret = -EINVAL;
goto err;
goto err_pin_prop;
}
pin->prop = prop;
ret = dpll_pin_prop_dup(prop, &pin->prop);
if (ret)
goto err_pin_prop;
refcount_set(&pin->refcount, 1);
xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
&dpll_pin_xa_id, GFP_KERNEL);
if (ret)
goto err;
goto err_xa_alloc;
return pin;
err:
err_xa_alloc:
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
dpll_pin_prop_free(&pin->prop);
err_pin_prop:
kfree(pin);
return ERR_PTR(ret);
}
@ -514,6 +563,7 @@ void dpll_pin_put(struct dpll_pin *pin)
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
xa_erase(&dpll_pin_xa, pin->id);
dpll_pin_prop_free(&pin->prop);
kfree(pin);
}
mutex_unlock(&dpll_lock);
@ -564,8 +614,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
WARN_ON(!ops->state_on_dpll_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
if (ASSERT_DPLL_REGISTERED(dpll))
return -EINVAL;
mutex_lock(&dpll_lock);
if (WARN_ON(!(dpll->module == pin->module &&
@ -636,15 +684,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
unsigned long i, stop;
int ret;
if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
return -EINVAL;
if (WARN_ON(!ops) ||
WARN_ON(!ops->state_on_pin_get) ||
WARN_ON(!ops->direction_get))
return -EINVAL;
if (ASSERT_PIN_REGISTERED(parent))
return -EINVAL;
mutex_lock(&dpll_lock);
ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);

View File

@ -44,7 +44,7 @@ struct dpll_device {
* @module: module of creator
* @dpll_refs: hold referencees to dplls pin was registered with
* @parent_refs: hold references to parent pins pin was registered with
* @prop: pointer to pin properties given by registerer
* @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
**/
@ -55,7 +55,7 @@ struct dpll_pin {
struct module *module;
struct xarray dpll_refs;
struct xarray parent_refs;
const struct dpll_pin_properties *prop;
struct dpll_pin_properties prop;
refcount_t refcount;
};

View File

@ -303,17 +303,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
DPLL_A_PIN_PAD))
return -EMSGSIZE;
for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
if (!nest)
return -EMSGSIZE;
freq = pin->prop->freq_supported[fs].min;
freq = pin->prop.freq_supported[fs].min;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
return -EMSGSIZE;
}
freq = pin->prop->freq_supported[fs].max;
freq = pin->prop.freq_supported[fs].max;
if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
&freq, DPLL_A_PIN_PAD)) {
nla_nest_cancel(msg, nest);
@ -329,9 +329,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
{
int fs;
for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
if (freq >= pin->prop->freq_supported[fs].min &&
freq <= pin->prop->freq_supported[fs].max)
for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
if (freq >= pin->prop.freq_supported[fs].min &&
freq <= pin->prop.freq_supported[fs].max)
return true;
return false;
}
@ -421,7 +421,7 @@ static int
dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
struct netlink_ext_ack *extack)
{
const struct dpll_pin_properties *prop = pin->prop;
const struct dpll_pin_properties *prop = &pin->prop;
struct dpll_pin_ref *ref;
int ret;
@ -553,6 +553,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
}
static bool dpll_pin_available(struct dpll_pin *pin)
{
struct dpll_pin_ref *par_ref;
unsigned long i;
if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
return false;
xa_for_each(&pin->parent_refs, i, par_ref)
if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
DPLL_REGISTERED))
return true;
xa_for_each(&pin->dpll_refs, i, par_ref)
if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
DPLL_REGISTERED))
return true;
return false;
}
/**
* dpll_device_change_ntf - notify that the dpll device has been changed
* @dpll: registered dpll pointer
@ -579,7 +597,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
int ret = -ENOMEM;
void *hdr;
if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
if (!dpll_pin_available(pin))
return -ENODEV;
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@ -717,7 +735,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@ -753,7 +771,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "state changing is not allowed");
return -EOPNOTSUPP;
}
@ -780,7 +798,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
int ret;
if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "prio changing is not allowed");
return -EOPNOTSUPP;
}
@ -808,7 +826,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
int ret;
if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
pin->prop->capabilities)) {
pin->prop.capabilities)) {
NL_SET_ERR_MSG(extack, "direction changing is not allowed");
return -EOPNOTSUPP;
}
@ -838,8 +856,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
int ret;
phase_adj = nla_get_s32(phase_adj_attr);
if (phase_adj > pin->prop->phase_range.max ||
phase_adj < pin->prop->phase_range.min) {
if (phase_adj > pin->prop.phase_range.max ||
phase_adj < pin->prop.phase_range.min) {
NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
"phase adjust value not supported");
return -EINVAL;
@ -1023,7 +1041,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
unsigned long i;
xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
prop = pin->prop;
prop = &pin->prop;
cid_match = clock_id ? pin->clock_id == clock_id : true;
mod_match = mod_name_attr && module_name(pin->module) ?
!nla_strcmp(mod_name_attr,
@ -1130,6 +1148,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
pin = dpll_pin_find_from_nlattr(info);
if (!IS_ERR(pin)) {
if (!dpll_pin_available(pin)) {
nlmsg_free(msg);
return -ENODEV;
}
ret = dpll_msg_add_pin_handle(msg, pin);
if (ret) {
nlmsg_free(msg);
@ -1179,6 +1201,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) {
if (!dpll_pin_available(pin))
continue;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
&dpll_nl_family, NLM_F_MULTI,
@ -1441,7 +1465,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
}
info->user_ptr[0] = xa_load(&dpll_pin_xa,
nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
if (!info->user_ptr[0]) {
if (!info->user_ptr[0] ||
!dpll_pin_available(info->user_ptr[0])) {
NL_SET_ERR_MSG(info->extack, "pin not found");
ret = -ENODEV;
goto unlock_dev;

View File

@ -195,7 +195,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
bool use_dma_alloc, bool use_dma32)
{
struct ttm_global *glob = &ttm_glob;
int ret;
int ret, nid;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
@ -215,7 +215,12 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
ttm_sys_man_init(bdev);
ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
if (dev)
nid = dev_to_node(dev);
else
nid = NUMA_NO_NODE;
ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);

View File

@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
MODULE_LICENSE("GPL");

View File

@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
module_init(NS8390p_init_module);
module_exit(NS8390p_cleanup_module);
MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
MODULE_LICENSE("GPL");

View File

@ -610,4 +610,5 @@ static int init_pcmcia(void)
return 1;
}
MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
module_init(hydra_init_module);
module_exit(hydra_cleanup_module);
MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
module_init(stnic_probe);
module_exit(stnic_cleanup);
MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
module_init(zorro8390_init_module);
module_exit(zorro8390_cleanup_module);
MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
};
module_platform_driver(bcm4908_enet_driver);
MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);

View File

@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
MODULE_LICENSE("GPL");

View File

@ -362,4 +362,5 @@ module_init(bgmac_init)
module_exit(bgmac_exit)
MODULE_AUTHOR("Rafał Miłecki");
MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
MODULE_LICENSE("GPL");

View File

@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
};
module_platform_driver(bgmac_enet_driver);
MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
MODULE_LICENSE("GPL");

View File

@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
EXPORT_SYMBOL_GPL(bgmac_enet_resume);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_DESCRIPTION("Broadcom iProc GBit driver");
MODULE_LICENSE("GPL");

View File

@ -3817,7 +3817,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
int i, j, rc, ulp_base_vec, ulp_msix;
int tcs = netdev_get_num_tc(bp->dev);
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@ -5935,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
if (!rx_rings)
return 0;
return bnxt_calc_nr_ring_pages(rx_rings - 1,
BNXT_RSS_TABLE_ENTRIES_P5);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return 2;
return 1;
@ -6926,7 +6930,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
if (cp < (rx + tx)) {
rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
if (rc)
return rc;
goto get_rings_exit;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
hw_resc->resv_rx_rings = rx;
@ -6938,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_cp_rings = cp;
hw_resc->resv_stat_ctxs = stats;
}
get_rings_exit:
hwrm_req_drop(bp, req);
return 0;
return rc;
}
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@ -7000,10 +7005,11 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_msix = cpu_to_le16(cp_rings);
req->num_rsscos_ctxs =
cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@ -7050,8 +7056,10 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req->num_tx_rings = cpu_to_le16(tx_rings);
req->num_rx_rings = cpu_to_le16(rx_rings);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
} else {
req->num_cmpl_rings = cpu_to_le16(cp_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@ -9938,7 +9946,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
{
int tcs = netdev_get_num_tc(bp->dev);
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
@ -9947,7 +9955,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
{
int tcs = netdev_get_num_tc(bp->dev);
int tcs = bp->num_tc;
return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
bp->tx_nr_rings_xdp;
@ -9977,7 +9985,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
struct net_device *dev = bp->dev;
int tcs, i;
tcs = netdev_get_num_tc(dev);
tcs = bp->num_tc;
if (tcs) {
int i, off, count;
@ -10009,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
if (netdev_get_num_tc(bp->dev))
if (bp->num_tc) {
netdev_reset_tc(bp->dev);
bp->num_tc = 0;
}
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
@ -10236,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
int tcs = netdev_get_num_tc(bp->dev);
bool irq_cleared = false;
int tcs = bp->num_tc;
int rc;
if (!bnxt_need_reserve_rings(bp))
@ -10263,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
bp->num_tc = 0;
if (bp->tx_nr_rings_xdp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
else
@ -11564,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
bnxt_init_napi(bp);
set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
rc = bnxt_init_nic(bp, true);
if (rc) {
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
bnxt_del_napi(bp);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@ -11586,6 +11599,7 @@ half_open_err:
void bnxt_half_close_nic(struct bnxt *bp)
{
bnxt_hwrm_resource_free(bp, false, true);
bnxt_del_napi(bp);
bnxt_free_skbs(bp);
bnxt_free_mem(bp, true);
clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@ -13232,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
/* FW may be unresponsive after FLR. FLR must complete within 100 msec
* so wait before continuing with recovery.
*/
if (rc)
msleep(100);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
rc = bnxt_try_recover_fw(bp);
@ -13784,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
if (netdev_get_num_tc(dev) == tc)
if (bp->num_tc == tc)
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@ -13802,9 +13821,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (tc) {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
netdev_set_num_tc(dev, tc);
bp->num_tc = tc;
} else {
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
bp->num_tc = 0;
}
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);

View File

@ -2225,6 +2225,7 @@ struct bnxt {
u8 tc_to_qidx[BNXT_MAX_QUEUE];
u8 q_ids[BNXT_MAX_QUEUE];
u8 max_q;
u8 num_tc;
unsigned int current_interval;
#define BNXT_TIMER_INTERVAL HZ

View File

@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
}
}
if (bp->ieee_ets) {
int tc = netdev_get_num_tc(bp->dev);
int tc = bp->num_tc;
if (!tc)
tc = 1;

View File

@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
tcs = netdev_get_num_tc(dev);
tcs = bp->num_tc;
tx_grps = max(tcs, 1);
if (bp->tx_nr_rings_xdp)
tx_grps++;
@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (channel->combined_count)
sh = true;
tcs = netdev_get_num_tc(dev);
tcs = bp->num_tc;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@ -1574,7 +1574,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
BNXT_RSS_TABLE_ENTRIES_P5;
return HW_HASH_INDEX_SIZE;
}

View File

@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog)
tx_xdp = bp->rx_nr_rings;
tc = netdev_get_num_tc(dev);
tc = bp->num_tc;
if (!tc)
tc = 1;
rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,

View File

@ -27,6 +27,7 @@
#include "octeon_network.h"
MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
MODULE_LICENSE("GPL");
/* OOM task polling interval */

View File

@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
module_platform_driver(ep93xx_eth_driver);
MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");

View File

@ -1485,7 +1485,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
xdp_prepare_buff(&xdp, page_address(entry->page),
XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
length, false);
length - ETH_FCS_LEN, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
&xdp_status, tx_nq, tx);
@ -1568,7 +1568,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
prefetch(entry->xdp->data);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length);
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
/* RX metadata with timestamps is in front of actual data,
@ -1762,6 +1762,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
allocated--;
}
}
/* set need wakeup flag immediately if ring is not filled completely,
* first polling would be too late as need wakeup signalisation would
* be delayed for an indefinite time
*/
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
int desc_available = tsnep_rx_desc_available(rx);
if (desc_available)
xsk_set_rx_need_wakeup(rx->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx->xsk_pool);
}
}
static bool tsnep_pending(struct tsnep_queue *queue)

View File

@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
module_platform_driver(nps_enet_driver);
MODULE_AUTHOR("EZchip Semiconductor");
MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
MODULE_LICENSE("GPL v2");

View File

@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(enetc_pci_remove);
MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
/* if any of the above changed restart the FEC */
if (status_change) {
netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
}
} else {
if (fep->link) {
netif_stop_queue(ndev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_stop(ndev);
@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
MODULE_LICENSE("GPL");

View File

@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
module_platform_driver(fsl_pq_mdio_driver);
MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
MODULE_LICENSE("GPL");

View File

@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
int err = 0;
bool ok;
int ret;
bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len = vsi->rx_buf_len;
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (ring->vsi->type != I40E_VSI_MAIN)
goto skip;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->queue_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
}
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->queue_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (ret)
return ret;
if (err)
return err;
dev_info(&vsi->back->pdev->dev,
"Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->queue_index);
} else {
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (ret)
return ret;
}
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
NULL);
if (err)
return err;
}
skip:
xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,

View File

@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int err;
u64_stats_init(&rx_ring->syncp);
@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
if (err < 0)
return err;
}
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
rx_ring->rx_bi =
@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
u32 next = rx_ring->next_to_clean;
u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
if (xdp_res == I40E_XDP_CONSUMED)
rx_buffer->pagecnt_bias++;
else
if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
else if (i++ <= nr_frags)
rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
* @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp,
u32 nr_frags)
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
struct skb_shared_info *skinfo = skb_shinfo(skb);
sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
* @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp,
u32 nr_frags)
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
struct skb_shared_info *sinfo;
struct sk_buff *skb;
u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
skb = i40e_build_skb(rx_ring, xdp, nfrags);
skb = i40e_build_skb(rx_ring, xdp);
else
skb = i40e_construct_skb(rx_ring, xdp, nfrags);
skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {

View File

@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
virt_to_page(xdp->data_hard_start), 0, size);
virt_to_page(xdp->data_hard_start),
XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
&rx_bytes, xdp_res, &failure);
first->flags = 0;
next_to_clean = next_to_process;
if (failure)
break;

View File

@ -547,19 +547,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
ring->rx_buf_len = ring->vsi->rx_buf_len;
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
__xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->vsi->rx_buf_len);
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
}
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
xdp_rxq_info_unreg(&ring->xdp_rxq);
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
@ -571,13 +579,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
__xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->vsi->rx_buf_len);
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
return err;
}
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,

View File

@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
if (rx_ring->vsi->type == ICE_VSI_PF &&
!xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
goto err;
return 0;
err:
@ -603,9 +598,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
rx_buf->act = ret;
if (unlikely(xdp_buff_has_frags(xdp)))
ice_set_rx_bufs_act(xdp, rx_ring, ret);
ice_set_rx_bufs_act(xdp, rx_ring, ret);
}
/**
@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
}
if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
if (unlikely(xdp_buff_has_frags(xdp)))
ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
return -ENOMEM;
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
/* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
* can pop off frags but driver has to handle it on its own
*/
rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp->data = NULL;
rx_ring->first_desc = ntc;
rx_ring->nr_frags = 0;
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@ -1266,10 +1263,12 @@ construct_skb:
ICE_XDP_CONSUMED);
xdp->data = NULL;
rx_ring->first_desc = ntc;
rx_ring->nr_frags = 0;
break;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
rx_ring->nr_frags = 0;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,

View File

@ -358,6 +358,7 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
u32 nr_frags;
dma_addr_t dma; /* physical address of ring */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */

View File

@ -12,26 +12,39 @@
* act: action to store onto Rx buffers related to XDP buffer parts
*
* Set action that should be taken before putting Rx buffer from first frag
* to one before last. Last one is handled by caller of this function as it
* is the EOP frag that is currently being processed. This function is
* supposed to be called only when XDP buffer contains frags.
* to the last.
*/
static inline void
ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
const unsigned int act)
{
const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
u32 first = rx_ring->first_desc;
u32 nr_frags = sinfo->nr_frags;
u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
struct ice_rx_buf *buf;
for (int i = 0; i < nr_frags; i++) {
buf = &rx_ring->rx_buf[first];
buf = &rx_ring->rx_buf[idx];
buf->act = act;
if (++first == cnt)
first = 0;
if (++idx == cnt)
idx = 0;
}
/* adjust pagecnt_bias on frags freed by XDP prog */
if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
u32 delta = rx_ring->nr_frags - sinfo_frags;
while (delta) {
if (idx == 0)
idx = cnt - 1;
else
idx--;
buf = &rx_ring->rx_buf[idx];
buf->pagecnt_bias--;
delta--;
}
}
}

View File

@ -825,7 +825,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
}
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
virt_to_page(xdp->data_hard_start), 0, size);
virt_to_page(xdp->data_hard_start),
XDP_PACKET_HEADROOM, size);
sinfo->xdp_frags_size += size;
xsk_buff_add_frag(xdp);
@ -895,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (!first) {
first = xdp;
xdp_buff_clear_frags_flag(first);
} else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
break;
}

View File

@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
netdev->dev_port = idx;
/* configure default MTU size */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = vport->max_mtu;

View File

@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
module_platform_driver(liteeth_driver);
MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
MODULE_LICENSE("GPL");

View File

@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
}
/* Cleanup pool before actual initialization in the OS */
static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
{
unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
u32 val;
int i;
/* Drain the BM from all possible residues left by firmware */
for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
put_cpu();
/* Stop the BM pool */
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
val |= MVPP2_BM_STOP_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
}
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
int i, err, poolnum = MVPP2_BM_POOLS_NUM;
struct mvpp2_port *port;
if (priv->percpu_pools)
poolnum = mvpp2_get_nrxqs(priv) * 2;
/* Clean up the pool state in case it contains stale state */
for (i = 0; i < poolnum; i++)
mvpp2_bm_pool_cleanup(priv, i);
if (priv->percpu_pools) {
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
}
}
poolnum = mvpp2_get_nrxqs(priv) * 2;
for (i = 0; i < poolnum; i++) {
/* the pool in use */
int pn = i / (poolnum / 2);

View File

@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
EXPORT_SYMBOL(otx2_mbox_id2name);
MODULE_AUTHOR("Marvell.");
MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
MODULE_LICENSE("GPL v2");

View File

@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
{
const char *namep = mlx5_command_str(opcode);
struct mlx5_cmd_stats *stats;
unsigned long flags;
if (!err || !(strcmp(namep, "unknown command opcode")))
return;
@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats = xa_load(&dev->cmd.stats, opcode);
if (!stats)
return;
spin_lock_irq(&stats->lock);
spin_lock_irqsave(&stats->lock, flags);
stats->failed++;
if (err < 0)
stats->last_failed_errno = -err;
@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
stats->last_failed_mbox_status = status;
stats->last_failed_syndrome = syndrome;
}
spin_unlock_irq(&stats->lock);
spin_unlock_irqrestore(&stats->lock, flags);
}
/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */

View File

@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
bool enable_mc_lb);

View File

@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
ft->g = NULL;
kvfree(in);
return -ENOMEM;
}

View File

@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
bool allow_swp;
allow_swp =
mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
allow_swp = mlx5_geneve_tx_allowed(mdev) ||
(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
mlx5e_build_sq_param_common(mdev, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);

View File

@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
md_buff[*md_buff_sz++] = metadata_id;
md_buff[(*md_buff_sz)++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);

View File

@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
attrs->dir = x->xso.dir;
/* esn */
if (x->props.flags & XFRM_STATE_ESN) {
attrs->replay_esn.trigger = true;
attrs->replay_esn.esn = sa_entry->esn_state.esn;
attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
goto skip_replay_window;
switch (x->replay_esn->replay_window) {
case 32:
attrs->replay_esn.replay_window =
@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
}
}
attrs->dir = x->xso.dir;
skip_replay_window:
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
if (x->replay_esn && x->replay_esn->replay_window != 32 &&
if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
x->replay_esn->replay_window != 32 &&
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {

View File

@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
kfree(ft->g);
kvfree(in);
if (!ft->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free_g;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
goto out;
goto err_free_in;
}
switch (type) {
@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
break;
default:
err = -EINVAL;
goto out;
goto err_free_in;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
goto err_clean_group;
ft->num_groups++;
memset(in, 0, inlen);
@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
goto err_clean_group;
ft->num_groups++;
kvfree(in);
return 0;
err:
err_clean_group:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
out:
err_free_in:
kvfree(in);
err_free_g:
kfree(ft->g);
ft->g = NULL;
return err;
}

View File

@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
{
int tc, i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(mdev, tisn[i][tc]);
}
@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
int tc, i;
int err;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
@ -140,7 +140,7 @@ err_close_tises:
return err;
}
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
int err;
@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
goto err_destroy_mkey;
}
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
goto err_destroy_bfreg;
if (create_tises) {
err = mlx5e_create_tises(mdev, res->tisn);
if (err) {
mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
goto err_destroy_bfreg;
}
res->tisn_valid = true;
}
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
mdev->mlx5e_res.dek_priv = NULL;
mlx5e_destroy_tises(mdev, res->tisn);
if (res->tisn_valid)
mlx5e_destroy_tises(mdev, res->tisn);
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);

View File

@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
if (netif_device_present(netdev))
return 0;
err = mlx5e_create_mdev_resources(mdev);
err = mlx5e_create_mdev_resources(mdev, true);
if (err)
return err;

View File

@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
mlx5e_rqt_size(mdev, priv->max_nch));
mlx5e_rqt_size(mdev, hp->num_channels));
if (err)
return err;
@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
list_del(&peer_flow->peer_flows);
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}

View File

@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
i++;
}
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, entry->key.addr);
@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
}
rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
kvfree(rule_spec);

View File

@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
!!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);

View File

@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
}
/* This should only be called once per mdev */
err = mlx5e_create_mdev_resources(mdev);
err = mlx5e_create_mdev_resources(mdev, false);
if (err)
goto destroy_ht;
}

View File

@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -

View File

@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
switch (action_type) {
case DR_ACTION_TYP_DROP:
attr.final_icm_addr = nic_dmn->drop_icm_addr;
attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
break;
case DR_ACTION_TYP_FT:
dest_action = action;
@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->sampler->tx_icm_addr;
break;
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
attr.final_icm_addr = rx_rule ?
action->vport->caps->icm_address_rx :
action->vport->caps->icm_address_tx;
if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
/* can't go to uplink on RX rule - dropping instead */
attr.final_icm_addr = nic_dmn->drop_icm_addr;
attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
} else {
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
attr.final_icm_addr = rx_rule ?
action->vport->caps->icm_address_rx :
action->vport->caps->icm_address_tx;
}
break;
case DR_ACTION_TYP_POP_VLAN:
if (!rx_rule && !(dmn->ste_ctx->actions_caps &

View File

@ -440,6 +440,27 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
{
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
err = mlx5_query_nic_vport_context(mdev, 0, out);
if (err)
goto out;
*sd_group = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.sd_group);
out:
kvfree(out);
return err;
}
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
{
u32 *out;

View File

@ -7542,6 +7542,9 @@ int stmmac_dvr_probe(struct device *device,
dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
ERR_PTR(ret));
/* Wait a bit for the reset to take effect */
udelay(10);
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)

View File

@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
if (!(hw->hw_info.req_buf))
return -ENOMEM;
if (!(hw->hw_info.req_buf)) {
result = -ENOMEM;
goto free_ep_info;
}
hw->hw_info.req_buf_size = mem_size;
mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
if (!(hw->hw_info.res_buf))
return -ENOMEM;
if (!(hw->hw_info.res_buf)) {
result = -ENOMEM;
goto free_req_buf;
}
hw->hw_info.res_buf_size = mem_size;
result = fjes_hw_alloc_shared_status_region(hw);
if (result)
return result;
goto free_res_buf;
hw->hw_info.buffer_share_bit = 0;
hw->hw_info.buffer_unshare_reserve_bit = 0;
@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
result = fjes_hw_alloc_epbuf(&buf_pair->tx);
if (result)
return result;
goto free_epbuf;
result = fjes_hw_alloc_epbuf(&buf_pair->rx);
if (result)
return result;
goto free_epbuf;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
fjes_hw_init_command_registers(hw, &param);
return 0;
free_epbuf:
for (epidx = 0; epidx < hw->max_epid ; epidx++) {
if (epidx == hw->my_epid)
continue;
fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
}
fjes_hw_free_shared_status_region(hw);
free_res_buf:
kfree(hw->hw_info.res_buf);
hw->hw_info.res_buf = NULL;
free_req_buf:
kfree(hw->hw_info.req_buf);
hw->hw_info.req_buf = NULL;
free_ep_info:
kfree(hw->ep_shm_info);
hw->ep_shm_info = NULL;
return result;
}
static void fjes_hw_cleanup(struct fjes_hw *hw)

View File

@ -44,7 +44,7 @@
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@ -2807,7 +2807,7 @@ static int __init netvsc_drv_init(void)
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
netvsc_ring_bytes = ring_size * PAGE_SIZE;
netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
register_netdevice_notifier(&netvsc_netdev_notifier);

View File

@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
ret = skb_ensure_writable_head_tail(skb, dev);
if (unlikely(ret < 0)) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(ret);
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
struct sk_buff *nskb = skb_copy_expand(skb,
MACSEC_NEEDED_HEADROOM,
MACSEC_NEEDED_TAILROOM,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
macsec_txsa_put(tx_sa);
return ERR_PTR(-ENOMEM);
}
}
unprotected_len = skb->len;

View File

@ -120,6 +120,11 @@
*/
#define LAN8814_1PPM_FORMAT 17179
#define PTP_RX_VERSION 0x0248
#define PTP_TX_VERSION 0x0288
#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
#define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0))
#define PTP_RX_MOD 0x024F
#define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
#define PTP_RX_TIMESTAMP_EN 0x024D
@ -3150,6 +3155,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
/* Disable checking for minorVersionPTP field */
lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
skb_queue_head_init(&ptp_priv->tx_queue);
skb_queue_head_init(&ptp_priv->rx_queue);
INIT_LIST_HEAD(&ptp_priv->rx_ts_list);

View File

@ -1630,13 +1630,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
switch (act) {
case XDP_REDIRECT:
err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
if (err)
if (err) {
dev_core_stats_rx_dropped_inc(tun->dev);
return err;
}
dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_TX:
err = tun_xdp_tx(tun->dev, xdp);
if (err < 0)
if (err < 0) {
dev_core_stats_rx_dropped_inc(tun->dev);
return err;
}
dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
break;
case XDP_PASS:
break;

View File

@ -368,10 +368,6 @@ struct ath11k_vif {
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_twt;
#endif /* CONFIG_ATH11K_DEBUGFS */
};
struct ath11k_vif_iter {

View File

@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
.open = simple_open
};
void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_base *ab = arvif->ar->ab;
struct dentry *debugfs_twt;
if (arvif->vif->type != NL80211_IFTYPE_AP &&
!(arvif->vif->type == NL80211_IFTYPE_STATION &&
test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
return;
arvif->debugfs_twt = debugfs_create_dir("twt",
arvif->vif->debugfs_dir);
debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
debugfs_twt = debugfs_create_dir("twt",
arvif->vif->debugfs_dir);
debugfs_create_file("add_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_add_dialog);
debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
debugfs_create_file("del_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_del_dialog);
debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
debugfs_create_file("pause_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_pause_dialog);
debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
debugfs_create_file("resume_dialog", 0200, debugfs_twt,
arvif, &ath11k_fops_twt_resume_dialog);
}
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
if (!arvif->debugfs_twt)
return;
debugfs_remove_recursive(arvif->debugfs_twt);
arvif->debugfs_twt = NULL;
}

View File

@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return ar->debug.rx_filter;
}
void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
enum ath11k_dbg_dbr_event event,
@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
return 0;
}
static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
{
}
static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
{
}
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,

View File

@ -6756,13 +6756,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err;
}
/* In the case of hardware recovery, debugfs files are
* not deleted since ieee80211_ops.remove_interface() is
* not invoked. In such cases, try to delete the files.
* These will be re-created later.
*/
ath11k_debugfs_remove_interface(arvif);
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
@ -6939,8 +6932,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
ath11k_debugfs_add_interface(arvif);
if (vif->type != NL80211_IFTYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
ret = ath11k_mac_monitor_vdev_create(ar);
@ -7056,8 +7047,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
ath11k_debugfs_remove_interface(arvif);
/* TODO: recal traffic pause state based on the available vdevs */
mutex_unlock(&ar->conf_mutex);
@ -9153,6 +9142,7 @@ static const struct ieee80211_ops ath11k_ops = {
#endif
#ifdef CONFIG_ATH11K_DEBUGFS
.vif_add_debugfs = ath11k_debugfs_op_vif_add,
.sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2018-2023 Intel Corporation
* Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
node_trig = (void *)node_tlv->data;
}
memcpy(node_trig->data + offset, trig->data, trig_data_len);
memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
node_tlv->length = cpu_to_le32(size);
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {

View File

@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
"FW rev %s - Softmac protocol %x.%x\n",
fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
"%s - %x.%x", fw_version,
"%.19s - %x.%x", fw_version,
priv->fw_var >> 8, priv->fw_var & 0xff);
}

View File

@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
}
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
shinfo->nr_frags++, gop++, nr_slots--) {
nr_slots--) {
if (unlikely(!txp->size)) {
unsigned long flags;
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
++txp;
continue;
}
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
++shinfo->nr_frags;
++gop;
if (txp == first)
txp = txfrags;
@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
if (unlikely(!txp->size)) {
unsigned long flags;
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, 0,
XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock,
flags);
continue;
}
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
++shinfo->nr_frags;
++gop;
}
skb_shinfo(skb)->frag_list = nskb;
} else if (nskb) {
if (shinfo->nr_frags) {
skb_shinfo(skb)->frag_list = nskb;
nskb = NULL;
}
}
if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
* because enough slots were converted to copy ops above.
* because enough slots were converted to copy ops above or some
* were empty.
*/
kfree_skb(nskb);
}

View File

@ -631,8 +631,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (logo_lines > vc->vc_bottom) {
logo_shown = FBCON_LOGO_CANSHOW;
printk(KERN_INFO
"fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n");
} else {
logo_shown = FBCON_LOGO_DRAW;
vc->vc_top = logo_lines;

View File

@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo *var,
DBG("savagefb_check_var");
if (!var->pixclock)
return -EINVAL;
var->transp.offset = 0;
var->transp.length = 0;
switch (var->bits_per_pixel) {

View File

@ -1444,6 +1444,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
if (!var->pixclock)
return -EINVAL;
pixclock = var->pixclock;
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {

View File

@ -1158,7 +1158,7 @@ stifb_init_display(struct stifb_info *fb)
}
break;
}
stifb_blank(0, (struct fb_info *)fb); /* 0=enable screen */
stifb_blank(0, fb->info); /* 0=enable screen */
SETUP_FB(fb);
}

View File

@ -374,7 +374,6 @@ static int vt8500lcd_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ defined\n");
ret = -ENODEV;
goto failed_free_palette;
}

View File

@ -124,7 +124,7 @@ static void afs_dir_read_cleanup(struct afs_read *req)
if (xas_retry(&xas, folio))
continue;
BUG_ON(xa_is_value(folio));
ASSERTCMP(folio_file_mapping(folio), ==, mapping);
ASSERTCMP(folio->mapping, ==, mapping);
folio_put(folio);
}
@ -202,12 +202,12 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
if (xas_retry(&xas, folio))
continue;
BUG_ON(folio_file_mapping(folio) != mapping);
BUG_ON(folio->mapping != mapping);
size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
for (offset = 0; offset < size; offset += sizeof(*block)) {
block = kmap_local_folio(folio, offset);
pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
kunmap_local(block);
}
}
@ -233,7 +233,7 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
if (xas_retry(&xas, folio))
continue;
BUG_ON(folio_file_mapping(folio) != mapping);
BUG_ON(folio->mapping != mapping);
if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
afs_dir_dump(dvnode, req);
@ -474,6 +474,14 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
continue;
}
/* Don't expose silly rename entries to userspace. */
if (nlen > 6 &&
dire->u.name[0] == '.' &&
ctx->actor != afs_lookup_filldir &&
ctx->actor != afs_lookup_one_filldir &&
memcmp(dire->u.name, ".__afs", 6) == 0)
continue;
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
ntohl(dire->u.vnode),
@ -708,6 +716,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
break;
}
if (vp->scb.status.abort_code)
trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
if (!vp->scb.have_status && !vp->scb.have_error)
continue;
@ -897,12 +907,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
}
inode = ERR_PTR(afs_op_error(op));
out_op:
if (!afs_op_error(op)) {
inode = &op->file[1].vnode->netfs.inode;
op->file[1].vnode = NULL;
if (op->file[1].scb.status.abort_code) {
afs_op_accumulate_error(op, -ECONNABORTED,
op->file[1].scb.status.abort_code);
} else {
inode = &op->file[1].vnode->netfs.inode;
op->file[1].vnode = NULL;
}
}
if (op->file[0].scb.have_status)
@ -2022,7 +2036,7 @@ static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
{
struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
folio_detach_private(folio);

View File

@ -258,16 +258,7 @@ const struct inode_operations afs_dynroot_inode_operations = {
.lookup = afs_dynroot_lookup,
};
/*
* Dirs in the dynamic root don't need revalidation.
*/
static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
{
return 1;
}
const struct dentry_operations afs_dynroot_dentry_operations = {
.d_revalidate = afs_dynroot_d_revalidate,
.d_delete = always_delete_dentry,
.d_release = afs_d_release,
.d_automount = afs_d_automount,

View File

@ -166,7 +166,7 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
if (!preflist) {
seq_puts(m, "NO PREFS\n");
return 0;
goto out;
}
seq_printf(m, "PROT SUBNET PRIOR (v=%u n=%u/%u/%u)\n",
@ -191,7 +191,8 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
}
}
rcu_read_lock();
out:
rcu_read_unlock();
return 0;
}

View File

@ -141,16 +141,16 @@ static int compression_decompress_bio(struct list_head *ws,
}
static int compression_decompress(int type, struct list_head *ws,
const u8 *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen)
const u8 *data_in, struct page *dest_page,
unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
start_byte, srclen, destlen);
dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
start_byte, srclen, destlen);
dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
start_byte, srclen, destlen);
dest_pgoff, srclen, destlen);
case BTRFS_COMPRESS_NONE:
default:
/*
@ -1037,14 +1037,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
* start_byte tells us the offset into the compressed data we're interested in
*/
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen)
unsigned long dest_pgoff, size_t srclen, size_t destlen)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
struct list_head *workspace;
const u32 sectorsize = fs_info->sectorsize;
int ret;
/*
* The full destination page range should not exceed the page size.
* And the @destlen should not exceed sectorsize, as this is only called for
* inline file extents, which should not exceed sectorsize.
*/
ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
workspace = get_workspace(type, 0);
ret = compression_decompress(type, workspace, data_in, dest_page,
start_byte, srclen, destlen);
dest_pgoff, srclen, destlen);
put_workspace(type, workspace);
return ret;

View File

@ -148,7 +148,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
@ -159,7 +159,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);

View File

@ -1260,7 +1260,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
u64 bytes_left, end;
u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
if (WARN_ON(start != aligned_start)) {
/* Adjust the range to be aligned to 512B sectors if necessary. */
if (start != aligned_start) {
len -= aligned_start - start;
len = round_down(len, 1 << SECTOR_SHIFT);
start = aligned_start;
@ -4298,6 +4299,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
return 0;
}
static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl)
{
if (ffe_ctl->for_treelog) {
spin_lock(&fs_info->treelog_bg_lock);
if (fs_info->treelog_bg)
ffe_ctl->hint_byte = fs_info->treelog_bg;
spin_unlock(&fs_info->treelog_bg_lock);
} else if (ffe_ctl->for_data_reloc) {
spin_lock(&fs_info->relocation_bg_lock);
if (fs_info->data_reloc_bg)
ffe_ctl->hint_byte = fs_info->data_reloc_bg;
spin_unlock(&fs_info->relocation_bg_lock);
} else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
struct btrfs_block_group *block_group;
spin_lock(&fs_info->zone_active_bgs_lock);
list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
/*
* No lock is OK here because avail is monotinically
* decreasing, and this is just a hint.
*/
u64 avail = block_group->zone_capacity - block_group->alloc_offset;
if (block_group_bits(block_group, ffe_ctl->flags) &&
avail >= ffe_ctl->num_bytes) {
ffe_ctl->hint_byte = block_group->start;
break;
}
}
spin_unlock(&fs_info->zone_active_bgs_lock);
}
return 0;
}
static int prepare_allocation(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
@ -4308,19 +4345,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
case BTRFS_EXTENT_ALLOC_ZONED:
if (ffe_ctl->for_treelog) {
spin_lock(&fs_info->treelog_bg_lock);
if (fs_info->treelog_bg)
ffe_ctl->hint_byte = fs_info->treelog_bg;
spin_unlock(&fs_info->treelog_bg_lock);
}
if (ffe_ctl->for_data_reloc) {
spin_lock(&fs_info->relocation_bg_lock);
if (fs_info->data_reloc_bg)
ffe_ctl->hint_byte = fs_info->data_reloc_bg;
spin_unlock(&fs_info->relocation_bg_lock);
}
return 0;
return prepare_allocation_zoned(fs_info, ffe_ctl);
default:
BUG();
}

View File

@ -4458,6 +4458,8 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
u64 root_flags;
int ret;
down_write(&fs_info->subvol_sem);
/*
* Don't allow to delete a subvolume with send in progress. This is
* inside the inode lock so the error handling that has to drop the bit
@ -4469,25 +4471,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
dest->root_key.objectid);
return -EPERM;
ret = -EPERM;
goto out_up_write;
}
if (atomic_read(&dest->nr_swapfiles)) {
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu with active swapfile",
root->root_key.objectid);
return -EPERM;
ret = -EPERM;
goto out_up_write;
}
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags | BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
down_write(&fs_info->subvol_sem);
ret = may_destroy_subvol(dest);
if (ret)
goto out_up_write;
goto out_undead;
btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
@ -4497,7 +4499,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
*/
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
if (ret)
goto out_up_write;
goto out_undead;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
@ -4563,15 +4565,17 @@ out_end_trans:
inode->i_flags |= S_DEAD;
out_release:
btrfs_subvolume_release_metadata(root, &block_rsv);
out_up_write:
up_write(&fs_info->subvol_sem);
out_undead:
if (ret) {
spin_lock(&dest->root_item_lock);
root_flags = btrfs_root_flags(&dest->root_item);
btrfs_set_root_flags(&dest->root_item,
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
} else {
}
out_up_write:
up_write(&fs_info->subvol_sem);
if (!ret) {
d_invalidate(dentry);
btrfs_prune_dentries(dest);
ASSERT(dest->send_in_progress == 0);

View File

@ -790,6 +790,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
return -EOPNOTSUPP;
}
if (btrfs_root_refs(&root->root_item) == 0)
return -ENOENT;
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return -EINVAL;
@ -2608,6 +2611,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = -EFAULT;
goto out;
}
if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
ret = -EOPNOTSUPP;
goto out;
}
/* compression requires us to start the IO */
if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
range.flags |= BTRFS_DEFRAG_RANGE_START_IO;

View File

@ -425,16 +425,16 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
}
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
const u32 sectorsize = fs_info->sectorsize;
size_t in_len;
size_t out_len;
size_t max_segment_len = WORKSPACE_BUF_LENGTH;
int ret = 0;
char *kaddr;
unsigned long bytes;
if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
return -EUCLEAN;
@ -451,7 +451,7 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
}
data_in += LZO_LEN;
out_len = PAGE_SIZE;
out_len = sectorsize;
ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
if (ret != LZO_E_OK) {
pr_warn("BTRFS: decompress failed!\n");
@ -459,29 +459,13 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
goto out;
}
if (out_len < start_byte) {
ASSERT(out_len <= sectorsize);
memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len);
/* Early end, considered as an error. */
if (unlikely(out_len < destlen)) {
ret = -EIO;
goto out;
memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len);
}
/*
* the caller is already checking against PAGE_SIZE, but lets
* move this check closer to the memcpy/memset
*/
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
bytes = min_t(unsigned long, destlen, out_len - start_byte);
kaddr = kmap_local_page(dest_page);
memcpy(kaddr, workspace->buf + start_byte, bytes);
/*
* btrfs_getblock is doing a zero on the tail of the page too,
* but this will cover anything missing from the decompressed
* data.
*/
if (bytes < destlen)
memset(kaddr+bytes, 0, destlen-bytes);
kunmap_local(kaddr);
out:
return ret;
}

View File

@ -889,8 +889,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
out_unlock:
spin_unlock(&fs_info->ref_verify_lock);
out:
if (ret)
if (ret) {
btrfs_free_ref_cache(fs_info);
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
}
return ret;
}
@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
}
}
if (ret) {
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
btrfs_free_ref_cache(fs_info);
btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
}
btrfs_free_path(path);
return ret;

View File

@ -1098,12 +1098,22 @@ out:
static void scrub_read_endio(struct btrfs_bio *bbio)
{
struct scrub_stripe *stripe = bbio->private;
struct bio_vec *bvec;
int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
int num_sectors;
u32 bio_size = 0;
int i;
ASSERT(sector_nr < stripe->nr_sectors);
bio_for_each_bvec_all(bvec, &bbio->bio, i)
bio_size += bvec->bv_len;
num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
if (bbio->bio.bi_status) {
bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
} else {
bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
}
bio_put(&bbio->bio);
if (atomic_dec_and_test(&stripe->pending_io)) {
@ -1636,6 +1646,9 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
stripe->bg->length - stripe->logical) >>
fs_info->sectorsize_bits;
u64 stripe_len = BTRFS_STRIPE_LEN;
int mirror = stripe->mirror_num;
int i;
@ -1646,6 +1659,10 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
struct page *page = scrub_stripe_get_page(stripe, i);
unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
/* We're beyond the chunk boundary, no need to read anymore. */
if (i >= nr_sectors)
break;
/* The current sector cannot be merged, submit the bio. */
if (bbio &&
((i > 0 &&
@ -1701,6 +1718,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_bio *bbio;
unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
stripe->bg->length - stripe->logical) >>
fs_info->sectorsize_bits;
int mirror = stripe->mirror_num;
ASSERT(stripe->bg);
@ -1715,14 +1735,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
scrub_read_endio, stripe);
/* Read the whole stripe. */
bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
/* Read the whole range inside the chunk boundary. */
for (unsigned int cur = 0; cur < nr_sectors; cur++) {
struct page *page = scrub_stripe_get_page(stripe, cur);
unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
int ret;
ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
/* We should have allocated enough bio vectors. */
ASSERT(ret == PAGE_SIZE);
ASSERT(ret == fs_info->sectorsize);
}
atomic_inc(&stripe->pending_io);

View File

@ -8205,8 +8205,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
goto out;
}
sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
arg->clone_sources_count + 1,
sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
sizeof(*sctx->clone_roots),
GFP_KERNEL);
if (!sctx->clone_roots) {
ret = -ENOMEM;

View File

@ -475,7 +475,8 @@ void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
spin_lock_irqsave(&subpage->lock, flags);
bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
folio_start_writeback(folio);
if (!folio_test_writeback(folio))
folio_start_writeback(folio);
spin_unlock_irqrestore(&subpage->lock, flags);
}

View File

@ -1457,6 +1457,14 @@ static int btrfs_reconfigure(struct fs_context *fc)
btrfs_info_to_ctx(fs_info, &old_ctx);
/*
* This is our "bind mount" trick, we don't want to allow the user to do
* anything other than mount a different ro/rw and a different subvol,
* all of the mount options should be maintained.
*/
if (mount_reconfigure)
ctx->mount_opt = old_ctx.mount_opt;
sync_filesystem(sb);
set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);

View File

@ -1436,7 +1436,7 @@ static int check_extent_item(struct extent_buffer *leaf,
if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
extent_err(leaf, slot,
"inline ref item overflows extent item, ptr %lu iref size %u end %lu",
ptr, inline_type, end);
ptr, btrfs_extent_inline_ref_size(inline_type), end);
return -EUCLEAN;
}

View File

@ -3087,7 +3087,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
map = btrfs_find_chunk_map(fs_info, logical, length);
if (unlikely(!map)) {
read_unlock(&fs_info->mapping_tree_lock);
btrfs_crit(fs_info,
"unable to find chunk map for logical %llu length %llu",
logical, length);
@ -3095,7 +3094,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
}
if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
read_unlock(&fs_info->mapping_tree_lock);
btrfs_crit(fs_info,
"found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
logical, logical + length, map->start,

View File

@ -354,18 +354,13 @@ done:
}
int zlib_decompress(struct list_head *ws, const u8 *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
int wbits = MAX_WBITS;
unsigned long bytes_left;
unsigned long total_out = 0;
unsigned long pg_offset = 0;
destlen = min_t(unsigned long, destlen, PAGE_SIZE);
bytes_left = destlen;
unsigned long to_copy;
workspace->strm.next_in = data_in;
workspace->strm.avail_in = srclen;
@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
return -EIO;
}
while (bytes_left > 0) {
unsigned long buf_start;
unsigned long buf_offset;
unsigned long bytes;
/*
* Everything (in/out buf) should be at most one sector, there should
* be no need to switch any input/output buffer.
*/
ret = zlib_inflate(&workspace->strm, Z_FINISH);
to_copy = min(workspace->strm.total_out, destlen);
if (ret != Z_STREAM_END)
goto out;
ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END)
break;
memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
buf_start = total_out;
total_out = workspace->strm.total_out;
if (total_out == buf_start) {
ret = -EIO;
break;
}
if (total_out <= start_byte)
goto next;
if (total_out > start_byte && buf_start < start_byte)
buf_offset = start_byte - buf_start;
else
buf_offset = 0;
bytes = min(PAGE_SIZE - pg_offset,
PAGE_SIZE - (buf_offset % PAGE_SIZE));
bytes = min(bytes, bytes_left);
memcpy_to_page(dest_page, pg_offset,
workspace->buf + buf_offset, bytes);
pg_offset += bytes;
bytes_left -= bytes;
next:
workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = workspace->buf_size;
}
if (ret != Z_STREAM_END && bytes_left != 0)
out:
if (unlikely(to_copy != destlen)) {
pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n",
to_copy, destlen);
ret = -EIO;
else
} else {
ret = 0;
}
zlib_inflateEnd(&workspace->strm);
/*
* this should only happen if zlib returned fewer bytes than we
* expected. btrfs_get_block is responsible for zeroing from the
* end of the inline extent (destlen) to the end of the page
*/
if (pg_offset < destlen) {
memzero_page(dest_page, pg_offset, destlen - pg_offset);
}
if (unlikely(to_copy < destlen))
memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
return ret;
}

View File

@ -2055,6 +2055,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
map = block_group->physical_map;
spin_lock(&fs_info->zone_active_bgs_lock);
spin_lock(&block_group->lock);
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
ret = true;
@ -2067,7 +2068,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
goto out_unlock;
}
spin_lock(&fs_info->zone_active_bgs_lock);
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_zoned_device_info *zinfo;
int reserved = 0;
@ -2087,20 +2087,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
*/
if (atomic_read(&zinfo->active_zones_left) <= reserved) {
ret = false;
spin_unlock(&fs_info->zone_active_bgs_lock);
goto out_unlock;
}
if (!btrfs_dev_set_active_zone(device, physical)) {
/* Cannot activate the zone */
ret = false;
spin_unlock(&fs_info->zone_active_bgs_lock);
goto out_unlock;
}
if (!is_data)
zinfo->reserved_active_zones--;
}
spin_unlock(&fs_info->zone_active_bgs_lock);
/* Successfully activated all the zones */
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
@ -2108,8 +2105,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
/* For the active block group list */
btrfs_get_block_group(block_group);
spin_lock(&fs_info->zone_active_bgs_lock);
list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
spin_unlock(&fs_info->zone_active_bgs_lock);
@ -2117,6 +2112,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
out_unlock:
spin_unlock(&block_group->lock);
spin_unlock(&fs_info->zone_active_bgs_lock);
return ret;
}

View File

@ -539,6 +539,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
struct fscache_volume *volume = object->volume->vcookie;
size_t volume_key_size, cookie_key_size, data_len;
if (!object->ondemand)
return 0;
/*
* CacheFiles will firstly check the cache file under the root cache
* directory. If the coherency check failed, it will fallback to

View File

@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
struct filename *tmp = getname(library);
int error = PTR_ERR(tmp);
static const struct open_flags uselib_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.open_flag = O_LARGEFILE | O_RDONLY,
.acc_mode = MAY_READ | MAY_EXEC,
.intent = LOOKUP_OPEN,
.lookup_flags = LOOKUP_FOLLOW,
@ -904,6 +904,10 @@ EXPORT_SYMBOL(transfer_args_to_stack);
#endif /* CONFIG_MMU */
/*
* On success, caller must call do_close_execat() on the returned
* struct file to close it.
*/
static struct file *do_open_execat(int fd, struct filename *name, int flags)
{
struct file *file;
@ -948,6 +952,17 @@ exit:
return ERR_PTR(err);
}
/**
* open_exec - Open a path name for execution
*
* @name: path name to open with the intent of executing it.
*
* Returns ERR_PTR on failure or allocated struct file on success.
*
* As this is a wrapper for the internal do_open_execat(), callers
* must call allow_write_access() before fput() on release. Also see
* do_close_execat().
*/
struct file *open_exec(const char *name)
{
struct filename *filename = getname_kernel(name);
@ -1409,6 +1424,9 @@ int begin_new_exec(struct linux_binprm * bprm)
out_unlock:
up_write(&me->signal->exec_update_lock);
if (!bprm->cred)
mutex_unlock(&me->signal->cred_guard_mutex);
out:
return retval;
}
@ -1484,6 +1502,15 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
return -ENOMEM;
}
/* Matches do_open_execat() */
static void do_close_execat(struct file *file)
{
if (!file)
return;
allow_write_access(file);
fput(file);
}
static void free_bprm(struct linux_binprm *bprm)
{
if (bprm->mm) {
@ -1495,10 +1522,7 @@ static void free_bprm(struct linux_binprm *bprm)
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
if (bprm->file) {
allow_write_access(bprm->file);
fput(bprm->file);
}
do_close_execat(bprm->file);
if (bprm->executable)
fput(bprm->executable);
/* If a binfmt changed the interp, free it. */
@ -1520,8 +1544,7 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
if (!bprm) {
allow_write_access(file);
fput(file);
do_close_execat(file);
return ERR_PTR(-ENOMEM);
}
@ -1610,6 +1633,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
}
rcu_read_unlock();
/* "users" and "in_exec" locked for copy_fs() */
if (p->fs->users > n_fs)
bprm->unsafe |= LSM_UNSAFE_SHARE;
else
@ -1826,9 +1850,6 @@ static int exec_binprm(struct linux_binprm *bprm)
return 0;
}
/*
* sys_execve() executes a new program.
*/
static int bprm_execve(struct linux_binprm *bprm)
{
int retval;

View File

@ -101,7 +101,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio_index(folio) == rreq->no_unlock_folio &&
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock");
else
@ -246,13 +246,13 @@ EXPORT_SYMBOL(netfs_readahead);
*/
int netfs_read_folio(struct file *file, struct folio *folio)
{
struct address_space *mapping = folio_file_mapping(folio);
struct address_space *mapping = folio->mapping;
struct netfs_io_request *rreq;
struct netfs_inode *ctx = netfs_inode(mapping->host);
struct folio *sink = NULL;
int ret;
_enter("%lx", folio_index(folio));
_enter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio),
@ -460,7 +460,7 @@ retry:
ret = PTR_ERR(rreq);
goto error;
}
rreq->no_unlock_folio = folio_index(folio);
rreq->no_unlock_folio = folio->index;
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
ret = netfs_begin_cache_read(rreq, ctx);
@ -518,7 +518,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t offset, size_t len)
{
struct netfs_io_request *rreq;
struct address_space *mapping = folio_file_mapping(folio);
struct address_space *mapping = folio->mapping;
struct netfs_inode *ctx = netfs_inode(mapping->host);
unsigned long long start = folio_pos(folio);
size_t flen = folio_size(folio);
@ -535,7 +535,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
goto error;
}
rreq->no_unlock_folio = folio_index(folio);
rreq->no_unlock_folio = folio->index;
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
ret = netfs_begin_cache_read(rreq, ctx);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)

View File

@ -221,10 +221,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
break;
ret = -ENOMEM;
folio = netfs_grab_folio_for_write(mapping, pos, part);
if (!folio)
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
break;
}
flen = folio_size(folio);
offset = pos & (flen - 1);
@ -343,7 +344,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
break;
default:
WARN(true, "Unexpected modify type %u ix=%lx\n",
howto, folio_index(folio));
howto, folio->index);
ret = -EIO;
goto error_folio_unlock;
}
@ -648,7 +649,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
xas_for_each(&xas, folio, last) {
WARN(!folio_test_writeback(folio),
"bad %zx @%llx page %lx %lx\n",
wreq->len, wreq->start, folio_index(folio), last);
wreq->len, wreq->start, folio->index, last);
if ((finfo = netfs_folio_info(folio))) {
/* Streaming writes cannot be redirtied whilst under
@ -795,7 +796,7 @@ static void netfs_extend_writeback(struct address_space *mapping,
continue;
if (xa_is_value(folio))
break;
if (folio_index(folio) != index) {
if (folio->index != index) {
xas_reset(xas);
break;
}
@ -901,7 +902,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
long count = wbc->nr_to_write;
int ret;
_enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching);
_enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
NETFS_WRITEBACK);
@ -1047,7 +1048,7 @@ search_again:
start = folio_pos(folio); /* May regress with THPs */
_debug("wback %lx", folio_index(folio));
_debug("wback %lx", folio->index);
/* At this point we hold neither the i_pages lock nor the page lock:
* the page may be truncated or invalidated (changing page->mapping to

Some files were not shown because too many files have changed in this diff Show More