2023-12-15 04:15:29 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
|
|
|
|
#ifndef _LINUX_NVMEM_INTERNALS_H
|
|
|
|
#define _LINUX_NVMEM_INTERNALS_H
|
|
|
|
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/nvmem-consumer.h>
|
|
|
|
#include <linux/nvmem-provider.h>
|
|
|
|
|
|
|
|
struct nvmem_device {
|
|
|
|
struct module *owner;
|
|
|
|
struct device dev;
|
|
|
|
struct list_head node;
|
|
|
|
int stride;
|
|
|
|
int word_size;
|
|
|
|
int id;
|
|
|
|
struct kref refcnt;
|
|
|
|
size_t size;
|
|
|
|
bool read_only;
|
|
|
|
bool root_only;
|
|
|
|
int flags;
|
|
|
|
enum nvmem_type type;
|
|
|
|
struct bin_attribute eeprom;
|
|
|
|
struct device *base_dev;
|
|
|
|
struct list_head cells;
|
2023-12-15 04:15:31 -07:00
|
|
|
void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
|
|
|
|
struct nvmem_cell_info *cell);
|
2023-12-15 04:15:29 -07:00
|
|
|
const struct nvmem_keepout *keepout;
|
|
|
|
unsigned int nkeepout;
|
|
|
|
nvmem_reg_read_t reg_read;
|
|
|
|
nvmem_reg_write_t reg_write;
|
|
|
|
struct gpio_desc *wp_gpio;
|
|
|
|
struct nvmem_layout *layout;
|
|
|
|
void *priv;
|
nvmem: core: Expose cells through sysfs
The binary content of nvmem devices is available to the user so in the
easiest cases, finding the content of a cell is rather easy as it is
just a matter of looking at a known and fixed offset. However, nvmem
layouts have been recently introduced to cope with more advanced
situations, where the offset and size of the cells is not known in
advance or is dynamic. When using layouts, more advanced parsers are
used by the kernel in order to give direct access to the content of each
cell, regardless of its position/size in the underlying
device. Unfortunately, these information are not accessible by users,
unless by fully re-implementing the parser logic in userland.
Let's expose the cells and their content through sysfs to avoid these
situations. Of course the relevant NVMEM sysfs Kconfig option must be
enabled for this support to be available.
Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute
group member will be filled at runtime only when relevant and will
remain empty otherwise. In this case, as the cells attribute group will
be empty, it will not lead to any additional folder/file creation.
Exposed cells are read-only. There is, in practice, everything in the
core to support a write path, but as I don't see any need for that, I
prefer to keep the interface simple (and probably safer). The interface
is documented as being in the "testing" state which means we can later
add a write attribute if though relevant.
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Tested-by: Rafał Miłecki <rafal@milecki.pl>
Tested-by: Chen-Yu Tsai <wenst@chromium.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Link: https://lore.kernel.org/r/20231215111536.316972-9-srinivas.kandagatla@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 04:15:34 -07:00
|
|
|
bool sysfs_cells_populated;
|
2023-12-15 04:15:29 -07:00
|
|
|
};
|
|
|
|
|
nvmem: core: Rework layouts to become regular devices
Current layout support was initially written without modules support in
mind. When the requirement for module support rose, the existing base
was improved to adopt modularization support, but kind of a design flaw
was introduced. With the existing implementation, when a storage device
registers into NVMEM, the core tries to hook a layout (if any) and
populates its cells immediately. This means, if the hardware description
expects a layout to be hooked up, but no driver was provided for that,
the storage medium will fail to probe and try later from
scratch. Even if we consider that the hardware description shall be
correct, we could still probe the storage device (especially if it
contains the rootfs).
One way to overcome this situation is to consider the layouts as
devices, and leverage the native notifier mechanism. When a new NVMEM
device is registered, we can populate its nvmem-layout child, if any,
and wait for the matching to be done in order to get the cells (the
waiting can be easily done with the NVMEM notifiers). If the layout
driver is compiled as a module, it should automatically be loaded. This
way, there is no strong order to enforce, any NVMEM device creation
or NVMEM layout driver insertion will be observed as a new event which
may lead to the creation of additional cells, without disturbing the
probes with costly (and sometimes endless) deferrals.
In order to achieve that goal we create a new bus for the nvmem-layouts
with minimal logic to match nvmem-layout devices with nvmem-layout
drivers. All this infrastructure code is created in the layouts.c file.
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Tested-by: Rafał Miłecki <rafal@milecki.pl>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Link: https://lore.kernel.org/r/20231215111536.316972-7-srinivas.kandagatla@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2023-12-15 04:15:32 -07:00
|
|
|
#if IS_ENABLED(CONFIG_OF)
|
|
|
|
int nvmem_layout_bus_register(void);
|
|
|
|
void nvmem_layout_bus_unregister(void);
|
|
|
|
int nvmem_populate_layout(struct nvmem_device *nvmem);
|
|
|
|
void nvmem_destroy_layout(struct nvmem_device *nvmem);
|
|
|
|
#else /* CONFIG_OF */
|
|
|
|
static inline int nvmem_layout_bus_register(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nvmem_layout_bus_unregister(void) {}
|
|
|
|
|
|
|
|
static inline int nvmem_populate_layout(struct nvmem_device *nvmem)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nvmem_destroy_layout(struct nvmem_device *nvmem) { }
|
|
|
|
#endif /* CONFIG_OF */
|
|
|
|
|
2023-12-15 04:15:29 -07:00
|
|
|
#endif /* ifndef _LINUX_NVMEM_INTERNALS_H */
|