/linux-4.1.27/arch/x86/kvm/ |
D | cpuid.c | 169 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_vcpu_ioctl_set_cpuid() 172 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); in kvm_vcpu_ioctl_set_cpuid() 177 cpuid->nent * sizeof(struct kvm_cpuid_entry))) in kvm_vcpu_ioctl_set_cpuid() 179 for (i = 0; i < cpuid->nent; i++) { in kvm_vcpu_ioctl_set_cpuid() 191 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid() 210 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) in kvm_vcpu_ioctl_set_cpuid2() 214 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) in kvm_vcpu_ioctl_set_cpuid2() 216 vcpu->arch.cpuid_nent = cpuid->nent; in kvm_vcpu_ioctl_set_cpuid2() 231 if (cpuid->nent < vcpu->arch.cpuid_nent) in kvm_vcpu_ioctl_get_cpuid2() 240 cpuid->nent = vcpu->arch.cpuid_nent; in kvm_vcpu_ioctl_get_cpuid2() [all …]
|
/linux-4.1.27/scripts/ |
D | conmakehash.c | 85 int i, nuni, nent; in main() local 275 nent = 0; in main() 278 while ( nent >= unicount[fp0] ) in main() 281 nent = 0; in main() 283 printf("0x%04x", unitable[fp0][nent++]); in main()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eq.c | 112 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); in next_eqe_sw() 114 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; in next_eqe_sw() 333 for (i = 0; i < eq->nent; i++) { in init_eq_buf() 340 int nent, u64 mask, const char *name, struct mlx5_uar *uar) in mlx5_create_map_eq() argument 348 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); in mlx5_create_map_eq() 349 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, in mlx5_create_map_eq() 367 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); in mlx5_create_map_eq()
|
D | main.c | 555 int nent; in alloc_comp_eqs() local 561 nent = MLX5_COMP_EQ_SIZE; in alloc_comp_eqs() 571 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, in alloc_comp_eqs()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_allocator.c | 160 int mthca_array_init(struct mthca_array *array, int nent) in mthca_array_init() argument 162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init() 177 void mthca_array_cleanup(struct mthca_array *array, int nent) in mthca_array_cleanup() argument 181 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) in mthca_array_cleanup()
|
D | mthca_eq.c | 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe() 466 int nent, in mthca_create_eq() argument 479 eq->nent = roundup_pow_of_two(max(nent, 2)); in mthca_create_eq() 480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq() 511 for (i = 0; i < eq->nent; ++i) in mthca_create_eq() 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); in mthca_create_eq() 560 eq->eqn, eq->nent); in mthca_create_eq() 593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq()
|
D | mthca_cq.c | 353 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) in mthca_alloc_cq_buf() argument 358 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, in mthca_alloc_cq_buf() 365 for (i = 0; i < nent; ++i) in mthca_alloc_cq_buf() 776 int mthca_init_cq(struct mthca_dev *dev, int nent, in mthca_init_cq() argument 784 cq->ibcq.cqe = nent - 1; in mthca_init_cq() 820 err = mthca_alloc_cq_buf(dev, &cq->buf, nent); in mthca_init_cq() 834 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
|
D | mthca_dev.h | 422 int mthca_array_init(struct mthca_array *array, int nent); 423 void mthca_array_cleanup(struct mthca_array *array, int nent); 498 int mthca_init_cq(struct mthca_dev *dev, int nent, 509 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
|
D | mthca_cmd.c | 662 int nent = 0; in mthca_map_cmd() local 692 pages[nent * 2] = cpu_to_be64(virt); in mthca_map_cmd() 696 pages[nent * 2 + 1] = in mthca_map_cmd() 702 if (++nent == MTHCA_MAILBOX_SIZE / 16) { in mthca_map_cmd() 703 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd() 707 nent = 0; in mthca_map_cmd() 712 if (nent) in mthca_map_cmd() 713 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
|
D | mthca_provider.h | 113 int nent; member
|
D | mthca_provider.c | 651 int nent; in mthca_create_cq() local 686 for (nent = 1; nent <= entries; nent <<= 1) in mthca_create_cq() 689 err = mthca_init_cq(to_mdev(ibdev), nent, in mthca_create_cq()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe() 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 727 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 730 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 752 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 754 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 764 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 767 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 920 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument 934 eq->nent = roundup_pow_of_two(max(nent, 2)); in mlx4_create_eq() [all …]
|
D | cq.c | 283 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, in mlx4_cq_alloc() argument 321 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); in mlx4_cq_alloc()
|
D | fw.c | 1346 int nent = 0; in mlx4_map_cmd() local 1376 pages[nent * 2] = cpu_to_be64(virt); in mlx4_map_cmd() 1380 pages[nent * 2 + 1] = in mlx4_map_cmd() 1386 if (++nent == MLX4_MAILBOX_SIZE / 16) { in mlx4_map_cmd() 1387 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, in mlx4_map_cmd() 1392 nent = 0; in mlx4_map_cmd() 1397 if (nent) in mlx4_map_cmd() 1398 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, in mlx4_map_cmd()
|
D | main.c | 2196 int nent; in mlx4_init_counters_table() local 2201 nent = dev->caps.max_counters; in mlx4_init_counters_table() 2202 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); in mlx4_init_counters_table()
|
D | mlx4.h | 390 int nent; member
|
/linux-4.1.27/drivers/iommu/ |
D | omap-iommu.c | 814 int nent = 1; in iopgtable_clear_entry_core() local 825 nent *= 16; in iopgtable_clear_entry_core() 829 bytes *= nent; in iopgtable_clear_entry_core() 830 memset(iopte, 0, nent * sizeof(*iopte)); in iopgtable_clear_entry_core() 831 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); in iopgtable_clear_entry_core() 842 nent = 1; /* for the next L1 entry */ in iopgtable_clear_entry_core() 846 nent *= 16; in iopgtable_clear_entry_core() 850 bytes *= nent; in iopgtable_clear_entry_core() 852 memset(iopgd, 0, nent * sizeof(*iopgd)); in iopgtable_clear_entry_core() 853 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); in iopgtable_clear_entry_core()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | cq.c | 77 static u8 sw_ownership_bit(int n, int nent) in sw_ownership_bit() argument 79 return (n & nent) ? 1 : 0; in sw_ownership_bit() 589 int nent, int cqe_size) in alloc_cq_buf() argument 593 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, in alloc_cq_buf() 599 buf->nent = nent; in alloc_cq_buf() 684 for (i = 0; i < buf->nent; i++) { in init_cq_buf() 1042 (i + 1) & (cq->resize_buf->nent), in copy_resize_cqes() 1045 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
|
D | mlx5_ib.h | 241 int nent; member
|
/linux-4.1.27/drivers/tty/serial/ |
D | pch_uart.c | 253 int nent; member 801 for (i = 0; i < priv->nent; i++, sg++) { in pch_dma_tx_complete() 807 dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); in pch_dma_tx_complete() 809 priv->nent = 0; in pch_dma_tx_complete() 950 int nent; in dma_handle_tx() local 1029 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); in dma_handle_tx() 1030 if (!nent) { in dma_handle_tx() 1034 priv->nent = nent; in dma_handle_tx() 1036 for (i = 0; i < nent; i++, sg++) { in dma_handle_tx() 1041 if (i == (nent - 1)) in dma_handle_tx() [all …]
|
D | atmel_serial.c | 842 int ret, nent; in atmel_prepare_tx_dma() local 861 nent = dma_map_sg(port->dev, in atmel_prepare_tx_dma() 866 if (!nent) { in atmel_prepare_tx_dma() 1020 int ret, nent; in atmel_prepare_rx_dma() local 1041 nent = dma_map_sg(port->dev, in atmel_prepare_rx_dma() 1046 if (!nent) { in atmel_prepare_rx_dma()
|
D | sh-sci.c | 1662 int nent; in sci_request_dma() local 1688 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); in sci_request_dma() 1689 if (!nent) in sci_request_dma() 1697 s->sg_len_tx = nent; in sci_request_dma()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_pcie.c | 275 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, in qib_pcie_params() argument 290 if (nent && *nent && pos) { in qib_pcie_params() 291 qib_msix_setup(dd, pos, nent, entry); in qib_pcie_params()
|
/linux-4.1.27/arch/x86/include/uapi/asm/ |
D | kvm.h | 201 __u32 nent; member 223 __u32 nent; member
|
/linux-4.1.27/arch/alpha/kernel/ |
D | pci_iommu.c | 125 long i, p, nent; in iommu_arena_find_pages() local 140 nent = arena->size >> PAGE_SHIFT; in iommu_arena_find_pages() 145 while (i < n && p+i < nent) { in iommu_arena_find_pages()
|
/linux-4.1.27/net/rds/ |
D | iw_send.c | 768 …s_iwdev, struct rds_iw_connection *ic, struct rds_iw_send_work *send, int nent, int len, u64 sg_ad… in rds_iw_build_send_fastreg() argument 770 BUG_ON(nent > send->s_page_list->max_page_list_len); in rds_iw_build_send_fastreg() 780 send->s_wr.wr.fast_reg.page_list_len = nent; in rds_iw_build_send_fastreg()
|
/linux-4.1.27/drivers/spi/ |
D | spi-topcliff-pch.c | 125 int nent; member 803 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, in pch_spi_start_transfer() 806 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, in pch_spi_start_transfer() 1053 dma->nent = num; in pch_spi_handle_dma() 1112 dma->nent = num; in pch_spi_handle_dma()
|
/linux-4.1.27/include/linux/mlx5/ |
D | driver.h | 352 int nent; member 724 int nent, u64 mask, const char *name, struct mlx5_uar *uar);
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | cq.c | 100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) in mlx4_ib_alloc_cq_buf() argument 104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, in mlx4_ib_alloc_cq_buf() 126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); in mlx4_ib_alloc_cq_buf()
|
/linux-4.1.27/include/xen/interface/ |
D | platform.h | 253 uint32_t nent; member
|
/linux-4.1.27/drivers/xen/ |
D | efi.c | 342 efi_systab_xen.nr_tables = info->cfg.nent; in xen_efi_probe()
|
/linux-4.1.27/drivers/target/ |
D | target_core_transport.c | 2288 unsigned int nent; in target_alloc_sgl() local 2291 nent = DIV_ROUND_UP(length, PAGE_SIZE); in target_alloc_sgl() 2292 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); in target_alloc_sgl() 2296 sg_init_table(sg, nent); in target_alloc_sgl() 2309 *nents = nent; in target_alloc_sgl()
|
/linux-4.1.27/arch/powerpc/xmon/ |
D | xmon.c | 2863 int nent, assoc, new_cc = 1; in dump_tlb_book3e() local 2882 nent = tlbcfg & 0xfff; in dump_tlb_book3e() 2884 for (i = 0; i < nent; i++) { in dump_tlb_book3e()
|
/linux-4.1.27/drivers/scsi/pm8001/ |
D | pm80xx_hwi.h | 859 __le32 nent; member
|
/linux-4.1.27/Documentation/virtual/kvm/ |
D | api.txt | 526 __u32 nent; 1178 __u32 nent; 1206 with the 'nent' field indicating the number of entries in the variable-size 1209 the 'nent' field is adjusted and an error (ENOMEM) is returned. If the 1210 number is just right, the 'nent' field is adjusted to the number of valid 2699 __u32 nent; 2726 structure with the 'nent' field indicating the number of entries in 2729 number is too high, the 'nent' field is adjusted and an error (ENOMEM) 2730 is returned. If the number is just right, the 'nent' field is adjusted
|
/linux-4.1.27/include/linux/mlx4/ |
D | device.h | 1061 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
|