Merge android-4.14-p.62 (366527f) into msm-4.14

* remotes/origin/tmp-366527f:
  Linux 4.14.62
  jfs: Fix inconsistency between memory allocation and ea_buf->max_size
  xfs: don't call xfs_da_shrink_inode with NULL bp
  xfs: validate cached inodes are free when allocated
  xfs: catch inode allocation state mismatch corruption
  intel_idle: Graceful probe failure when MWAIT is disabled
  nvmet-fc: fix target sgl list on large transfers
  nvme-pci: Fix queue double allocations
  nvme-pci: allocate device queues storage space at probe
  Btrfs: fix file data corruption after cloning a range and fsync
  i2c: imx: Fix reinit_completion() use
  ring_buffer: tracing: Inherit the tracing setting to next ring buffer
  ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle
  ext4: fix false negatives *and* false positives in ext4_check_descriptors()
  netlink: Don't shift on 64 for ngroups
  nohz: Fix missing tick reprogram when interrupting an inline softirq
  nohz: Fix local_timer_softirq_pending()
  genirq: Make force irq threading setup more robust
  scsi: qla2xxx: Return error when TMF returns
  scsi: qla2xxx: Fix ISP recovery on unload
  scsi: qla2xxx: Fix NPIV deletion by calling wait_for_sess_deletion
  scsi: qla2xxx: Fix unintialized List head crash

Change-Id: I22b4b42fe0e8dbe3e92da168c5dd1bff1d4816e9
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
tirimbino
Isaac J. Manjarres 7 years ago
commit c1085cd6f6
  1. 2
      Makefile
  2. 3
      drivers/i2c/busses/i2c-imx.c
  3. 7
      drivers/idle/intel_idle.c
  4. 64
      drivers/nvme/host/pci.c
  5. 44
      drivers/nvme/target/fc.c
  6. 2
      drivers/pci/pci-acpi.c
  7. 1
      drivers/scsi/qla2xxx/qla_attr.c
  8. 1
      drivers/scsi/qla2xxx/qla_gbl.h
  9. 4
      drivers/scsi/qla2xxx/qla_gs.c
  10. 7
      drivers/scsi/qla2xxx/qla_init.c
  11. 2
      drivers/scsi/qla2xxx/qla_inline.h
  12. 5
      drivers/scsi/qla2xxx/qla_mid.c
  13. 7
      drivers/scsi/qla2xxx/qla_os.c
  14. 3
      fs/btrfs/extent_io.c
  15. 4
      fs/ext4/super.c
  16. 10
      fs/jfs/xattr.c
  17. 5
      fs/xfs/libxfs/xfs_attr_leaf.c
  18. 58
      fs/xfs/xfs_icache.c
  19. 1
      include/linux/ring_buffer.h
  20. 9
      kernel/irq/manage.c
  21. 2
      kernel/softirq.c
  22. 2
      kernel/time/tick-sched.c
  23. 16
      kernel/trace/ring_buffer.c
  24. 6
      kernel/trace/trace.c
  25. 4
      net/netlink/af_netlink.c

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 61
SUBLEVEL = 62
EXTRAVERSION =
NAME = Petit Gorille

@ -376,6 +376,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@ -619,7 +620,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@ -678,7 +678,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));

@ -1061,7 +1061,7 @@ static const struct idle_cpu idle_cpu_dnv = {
};
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
@ -1125,6 +1125,11 @@ static int __init intel_idle_probe(void)
return -ENODEV;
}
if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
pr_debug("Please enable MWAIT in BIOS SETUP\n");
return -ENODEV;
}
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;

@ -77,7 +77,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
struct nvme_queue **queues;
struct nvme_queue *queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
@ -348,7 +348,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_queue *nvmeq = &dev->queues[0];
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@ -370,7 +370,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
if (!nvmeq->tags)
nvmeq->tags = &dev->tagset.tags[hctx_idx];
@ -386,7 +386,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
struct nvme_queue *nvmeq = dev->queues[queue_idx];
struct nvme_queue *nvmeq = &dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@ -900,7 +900,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_queue *nvmeq = &dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
@ -1146,7 +1146,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
if (nvmeq->sq_cmds)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@ -1154,10 +1153,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
dev->ctrl.queue_count--;
dev->queues[i] = NULL;
nvme_free_queue(nvmeq);
nvme_free_queue(&dev->queues[i]);
}
}
@ -1189,10 +1186,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_queue *nvmeq = &dev->queues[0];
if (!nvmeq)
return;
if (nvme_suspend_queue(nvmeq))
return;
@ -1246,13 +1241,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int node)
static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int node)
{
struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
node);
if (!nvmeq)
return NULL;
struct nvme_queue *nvmeq = &dev->queues[qid];
if (dev->ctrl.queue_count > qid)
return 0;
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
@ -1271,17 +1266,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_depth = depth;
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
dev->queues[qid] = nvmeq;
dev->ctrl.queue_count++;
return nvmeq;
return 0;
free_cqdma:
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
kfree(nvmeq);
return NULL;
return -ENOMEM;
}
static int queue_request_irq(struct nvme_queue *nvmeq)
@ -1468,14 +1461,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
nvmeq = dev->queues[0];
if (!nvmeq) {
nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
dev_to_node(dev->dev));
if (!nvmeq)
return -ENOMEM;
}
result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
dev_to_node(dev->dev));
if (result)
return result;
nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@ -1505,7 +1496,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
/* vector == qid - 1, match nvme_create_queue */
if (!nvme_alloc_queue(dev, i, dev->q_depth,
if (nvme_alloc_queue(dev, i, dev->q_depth,
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
ret = -ENOMEM;
break;
@ -1514,7 +1505,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
ret = nvme_create_queue(dev->queues[i], i);
ret = nvme_create_queue(&dev->queues[i], i);
if (ret)
break;
}
@ -1770,7 +1761,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = dev->queues[0];
struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned long size;
@ -1896,7 +1887,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
retry:
timeout = ADMIN_TIMEOUT;
for (; i > 0; i--, sent++)
if (nvme_delete_queue(dev->queues[i], opcode))
if (nvme_delete_queue(&dev->queues[i], opcode))
break;
while (sent--) {
@ -2081,7 +2072,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
queues = dev->online_queues - 1;
for (i = dev->ctrl.queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]);
nvme_suspend_queue(&dev->queues[i]);
if (dead) {
/* A device might become IO incapable very soon during
@ -2089,7 +2080,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
* queue_count can be 0 here.
*/
if (dev->ctrl.queue_count)
nvme_suspend_queue(dev->queues[0]);
nvme_suspend_queue(&dev->queues[0]);
} else {
nvme_disable_io_queues(dev, queues);
nvme_disable_admin_queue(dev, shutdown);
@ -2345,7 +2336,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(struct nvme_queue),
GFP_KERNEL, node);
if (!dev->queues)
goto free;

@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir {
NVMET_FCP_NODATA,
@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_cmd_iu cmdiubuf;
struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma;
struct scatterlist *next_sg;
struct scatterlist *data_sg;
int data_sg_cnt;
u32 total_length;
@ -1000,8 +1001,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
template->max_sgl_segments);
newrec->max_sg_cnt = template->max_sgl_segments;
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@ -1717,6 +1717,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
/* note: write from initiator perspective */
fod->next_sg = fod->data_sg;
return 0;
@ -1874,24 +1875,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
struct scatterlist *sg = fod->next_sg;
unsigned long flags;
u32 tlen;
u32 remaininglen = fod->total_length - fod->offset;
u32 tlen = 0;
int ret;
fcpreq->op = op;
fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
(fod->total_length - fod->offset));
/*
* for next sequence:
* break at a sg element boundary
* attempt to keep sequence length capped at
* NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
* be longer if a single sg element is larger
* than that amount. This is done to avoid creating
* a new sg list to use for the tgtport api.
*/
fcpreq->sg = sg;
fcpreq->sg_cnt = 0;
while (tlen < remaininglen &&
fcpreq->sg_cnt < tgtport->max_sg_cnt &&
tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
fcpreq->sg_cnt++;
tlen += sg_dma_len(sg);
sg = sg_next(sg);
}
if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
fcpreq->sg_cnt++;
tlen += min_t(u32, sg_dma_len(sg), remaininglen);
sg = sg_next(sg);
}
if (tlen < remaininglen)
fod->next_sg = sg;
else
fod->next_sg = NULL;
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
fcpreq->rsplen = 0;
fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
/*
* If the last READDATA request: check if LLDD supports
* combined xfr with response.

@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
union acpi_object *obj;
struct pci_host_bridge *bridge;
if (acpi_pci_disabled || !bus->bridge)
if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
return;
acpi_pci_slot_enumerate(bus);

@ -2142,6 +2142,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
msleep(1000);
qla24xx_disable_vp(vha);
qla2x00_wait_for_sess_deletion(vha);
vha->flags.delete_progress = 1;

@ -200,6 +200,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
/*
* Global Functions in qla_mid.c source file.

@ -3368,6 +3368,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
return rval;
done_free_sp:
spin_lock_irqsave(&vha->hw->vport_slock, flags);
list_del(&sp->elem);
spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt),

@ -1326,11 +1326,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
wait_for_completion(&tm_iocb->u.tmf.comp);
rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_FUNCTION_FAILED;
rval = tm_iocb->u.tmf.data;
if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
ql_dbg(ql_dbg_taskm, vha, 0x8030,
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x8030,
"TM IOCB failed (%x).\n", rval);
}

@ -221,6 +221,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
sp->fcport = fcport;
sp->iocbs = 1;
sp->vha = qpair->vha;
INIT_LIST_HEAD(&sp->elem);
done:
if (!sp)
QLA_QPAIR_MARK_NOT_BUSY(qpair);

@ -152,10 +152,15 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
{
unsigned long flags;
int ret;
fc_port_t *fcport;
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
list_for_each_entry(fcport, &vha->vp_fcports, list)
fcport->logout_on_delete = 0;
qla2x00_mark_all_devices_lost(vha, 0);
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->vport_slock, flags);

@ -1136,7 +1136,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
* qla2x00_wait_for_sess_deletion can only be called from remove_one.
* it has dependency on UNLOADING flag to stop device discovery
*/
static void
void
qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
{
qla2x00_mark_all_devices_lost(vha, 0);
@ -5794,8 +5794,9 @@ qla2x00_do_dpc(void *data)
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
}
if (test_and_clear_bit(ISP_ABORT_NEEDED,
&base_vha->dpc_flags)) {
if (test_and_clear_bit
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");

@ -4280,6 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_map *em;
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
if (gfpflags_allow_blocking(mask) &&
page->mapping->host->i_size > SZ_16M) {
@ -4302,6 +4303,8 @@ int try_release_extent_mapping(struct extent_map_tree *map,
extent_map_end(em) - 1,
EXTENT_LOCKED | EXTENT_WRITEBACK,
0, NULL)) {
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&btrfs_inode->runtime_flags);
remove_extent_mapping(map, em);
/* once for the rb tree */
free_extent_map(em);

@ -2298,7 +2298,7 @@ static int ext4_check_descriptors(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
@ -4035,13 +4035,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);

@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (size > PSIZE) {
/*
* To keep the rest of the code simple. Allocate a
* contiguous buffer to work with
* contiguous buffer to work with. Make the buffer large
* enough to make use of the whole extent.
*/
ea_buf->xattr = kmalloc(size, GFP_KERNEL);
ea_buf->max_size = (size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
if (ea_buf->xattr == NULL)
return -ENOMEM;
ea_buf->flag = EA_MALLOC;
ea_buf->max_size = (size + sb->s_blocksize - 1) &
~(sb->s_blocksize - 1);
if (ea_size == 0)
return 0;

@ -785,9 +785,8 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
ASSERT(blkno == 0);
error = xfs_attr3_leaf_create(args, blkno, &bp);
if (error) {
error = xfs_da_shrink_inode(args, 0, bp);
bp = NULL;
if (error)
/* xfs_attr3_leaf_create may not have instantiated a block */
if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */

@ -305,6 +305,46 @@ xfs_reinit_inode(
return error;
}
/*
* If we are allocating a new inode, then check what was returned is
* actually a free, empty inode. If we are not allocating an inode,
* then check we didn't find a free inode.
*
* Returns:
* 0 if the inode free state matches the lookup context
* -ENOENT if the inode is free and we are not allocating
* -EFSCORRUPTED if there is any state mismatch at all
*/
static int
xfs_iget_check_free_state(
struct xfs_inode *ip,
int flags)
{
if (flags & XFS_IGET_CREATE) {
/* should be a free inode */
if (VFS_I(ip)->i_mode != 0) {
xfs_warn(ip->i_mount,
"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
ip->i_ino, VFS_I(ip)->i_mode);
return -EFSCORRUPTED;
}
if (ip->i_d.di_nblocks != 0) {
xfs_warn(ip->i_mount,
"Corruption detected! Free inode 0x%llx has blocks allocated!",
ip->i_ino);
return -EFSCORRUPTED;
}
return 0;
}
/* should be an allocated inode */
if (VFS_I(ip)->i_mode == 0)
return -ENOENT;
return 0;
}
/*
* Check the validity of the inode we just found it the cache
*/
@ -354,12 +394,12 @@ xfs_iget_cache_hit(
}
/*
* If lookup is racing with unlink return an error immediately.
* Check the inode free state is valid. This also detects lookup
* racing with unlinks.
*/
if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
error = -ENOENT;
error = xfs_iget_check_free_state(ip, flags);
if (error)
goto out_error;
}
/*
* If IRECLAIMABLE is set, we've torn down the VFS inode already.
@ -475,10 +515,14 @@ xfs_iget_cache_miss(
trace_xfs_iget_miss(ip);
if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
error = -ENOENT;
/*
* Check the inode free state is valid. This also detects lookup
* racing with unlinks.
*/
error = xfs_iget_check_free_state(ip, flags);
if (error)
goto out_destroy;
}
/*
* Preload the radix tree so we can insert safely under the

@ -160,6 +160,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer);
int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);

@ -1033,6 +1033,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
return 0;
/*
* No further action required for interrupts which are requested as
* threaded interrupts already
*/
if (new->handler == irq_default_primary_handler)
return 0;
new->flags |= IRQF_ONESHOT;
/*
@ -1040,7 +1047,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
* thread handler. We force thread them as well by creating a
* secondary action.
*/
if (new->handler != irq_default_primary_handler && new->thread_fn) {
if (new->handler && new->thread_fn) {
/* Allocate the secondary action */
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!new->secondary)

@ -390,7 +390,7 @@ static inline void tick_irq_exit(void)
/* Make sure that timer wheel updates are propagated */
if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
if (!in_interrupt())
if (!in_irq())
tick_nohz_irq_exit();
}
#endif

@ -679,7 +679,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
static inline bool local_timer_softirq_pending(void)
{
return local_softirq_pending() & TIMER_SOFTIRQ;
return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
}
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)

@ -3109,6 +3109,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
return !atomic_read(&buffer->record_disabled);
}
/**
* ring_buffer_record_is_set_on - return true if the ring buffer is set writable
* @buffer: The ring buffer to see if write is set enabled
*
* Returns true if the ring buffer is set writable by ring_buffer_record_on().
* Note that this does NOT mean it is in a writable state.
*
* It may return true when the ring buffer has been disabled by
* ring_buffer_record_disable(), as that is a temporary disabling of
* the ring buffer.
*/
int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
{
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
}
/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to.

@ -1367,6 +1367,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&tr->max_lock);
/* Inherit the recordable setting from trace_buffer */
if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
ring_buffer_record_on(tr->max_buffer.buffer);
else
ring_buffer_record_off(tr->max_buffer.buffer);
buf = tr->trace_buffer.buffer;
tr->trace_buffer.buffer = tr->max_buffer.buffer;
tr->max_buffer.buffer = buf;

@ -981,8 +981,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->ngroups == 0)
groups = 0;
else
groups &= (1ULL << nlk->ngroups) - 1;
else if (nlk->ngroups < 8*sizeof(groups))
groups &= (1UL << nlk->ngroups) - 1;
bound = nlk->bound;
if (bound) {

Loading…
Cancel
Save