mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2025 -----
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

July 2021

  • 15 participants
  • 107 discussions
[PATCH kernel-4.19 1/4] xfs: let writable tracepoint enable to clear flag of f_mode
by Yang Yingliang 12 Jul '21

12 Jul '21
From: Yufen Yu <yuyufen(a)huawei.com> hulk inclusion category: feature bugzilla: 173267 CVE: NA --------------------------- Adding a new member clear_f_mode into struct xfs_writable_file, then we can clear some flag of file->f_mode. Signed-off-by: Yufen Yu <yuyufen(a)huawei.com> Signed-off-by: Zhihao Cheng <chengzhihao1(a)huawei.com> Reviewed-by: Hou Tao <houtao1(a)huawei.com> Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com> --- fs/xfs/xfs_file.c | 10 +++++++--- include/linux/fs.h | 9 ++++++--- include/uapi/linux/xfs.h | 4 +++- tools/include/uapi/linux/xfs.h | 2 +- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ffc388c8b4523..bd8ae4df20042 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -35,6 +35,8 @@ #include <linux/mman.h> #include <linux/fadvise.h> +#define FMODE_MASK (FMODE_RANDOM | FMODE_WILLNEED | FMODE_SPC_READAHEAD) + static const struct vm_operations_struct xfs_file_vm_ops; int @@ -238,15 +240,17 @@ xfs_file_buffered_aio_read( struct xfs_writable_file file; file.name = file_dentry(filp)->d_name.name; + file.clear_f_mode = 0; file.f_mode = 0; file.i_size = file_inode(filp)->i_size; - file.prev_pos = filp->f_ra.prev_pos; + file.prev_pos = filp->f_ra.prev_pos >> PAGE_SHIFT; + file.pos = iocb->ki_pos >> PAGE_SHIFT; trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); trace_xfs_file_read(&file, ip, iov_iter_count(to), iocb->ki_pos); - if (file.f_mode) - filp->f_mode |= file.f_mode; + filp->f_mode |= file.f_mode & FMODE_MASK; + filp->f_mode &= ~(file.clear_f_mode & FMODE_MASK); if (iocb->ki_flags & IOCB_NOWAIT) { if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) diff --git a/include/linux/fs.h b/include/linux/fs.h index f5bc43ac95035..394da46d143c2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -160,6 +160,12 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File is stream-like */ #define FMODE_STREAM ((__force fmode_t)0x200000) +/* File will try to read head of the file into pagecache */ +#define FMODE_WILLNEED ((__force fmode_t)0x400000) + +/* File will do specail readahead */ +#define FMODE_SPC_READAHEAD ((__force fmode_t)0x800000) + /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) @@ -169,9 +175,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File does not contribute to nr_files count */ #define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) -/* File will try to read head of the file into pagecache */ -#define FMODE_WILLNEED ((__force fmode_t)0x40000000) - /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector * that indicates that they should check the contents of the iovec are diff --git a/include/uapi/linux/xfs.h b/include/uapi/linux/xfs.h index 635a83914273b..0a11c2344e5a3 100644 --- a/include/uapi/linux/xfs.h +++ b/include/uapi/linux/xfs.h @@ -7,9 +7,11 @@ struct xfs_writable_file { const unsigned char *name; + unsigned int clear_f_mode; /* can be cleared from file->f_mode */ unsigned int f_mode; /* can be set into file->f_mode */ long long i_size; /* file size */ - long long prev_pos; /* ra->prev_pos */ + long long prev_pos; /* ra->prev_pos page index */ + long long pos; /* iocb->ki_pos page index */ }; #endif /* _UAPI_LINUX_XFS_H */ diff --git a/tools/include/uapi/linux/xfs.h b/tools/include/uapi/linux/xfs.h index f333a2eb74074..2c4c61d5ba539 100644 --- a/tools/include/uapi/linux/xfs.h +++ b/tools/include/uapi/linux/xfs.h @@ -5,7 +5,7 @@ #include <linux/types.h> #define FMODE_RANDOM (0x1000) -#define FMODE_WILLNEED (0x40000000) +#define FMODE_WILLNEED (0x400000) struct xfs_writable_file { const unsigned char *name; -- 2.25.1
1 3
0 0
[PATCH openEuler-1.0-LTS] jbd2: fix kabi broken in struct journal_s
by Yang Yingliang 09 Jul '21

09 Jul '21
From: yangerkun <yangerkun(a)huawei.com> hulk inclusion category: bugfix bugzilla: 172974 CVE: NA --------------------------- 72c9e4df6a99 ('jbd2: ensure abort the journal if detect IO error when writing original buffer back') will add 'j_atomic_flags' which can lead lots of kabi broken like jbd2_journal_destroy/jbd2_journal_abort and so on. Fix it by add a wrapper. Signed-off-by: yangerkun <yangerkun(a)huawei.com> Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com> Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com> --- fs/jbd2/checkpoint.c | 5 ++++- fs/jbd2/journal.c | 19 +++++++++++++------ include/linux/jbd2.h | 23 ++++++++++++++++++----- 3 files changed, 35 insertions(+), 12 deletions(-) diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index b1af15ad36dcb..f2c36c9c58be3 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -562,6 +562,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) struct transaction_chp_stats_s *stats; transaction_t *transaction; journal_t *journal; + journal_wrapper_t *journal_wrapper; struct buffer_head *bh = jh2bh(jh); JBUFFER_TRACE(jh, "entry"); @@ -572,6 +573,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) return 0; } journal = transaction->t_journal; + journal_wrapper = container_of(journal, journal_wrapper_t, + jw_journal); JBUFFER_TRACE(jh, "removing from transaction"); @@ -583,7 +586,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) * journal here and we abort the journal later from a better context. */ if (buffer_write_io_error(bh)) - set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags); + set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal_wrapper->j_atomic_flags); __buffer_unlink(jh); jh->b_cp_transaction = NULL; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 89fad4c3e13cb..ef9a942fc9a1a 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1129,14 +1129,17 @@ static journal_t *journal_init_common(struct block_device *bdev, { static struct lock_class_key jbd2_trans_commit_key; journal_t *journal; + journal_wrapper_t *journal_wrapper; int err; struct buffer_head *bh; int n; - journal = kzalloc(sizeof(*journal), GFP_KERNEL); - if (!journal) + journal_wrapper = kzalloc(sizeof(*journal_wrapper), GFP_KERNEL); + if (!journal_wrapper) return NULL; + journal = &(journal_wrapper->jw_journal); + init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_done_commit); init_waitqueue_head(&journal->j_wait_commit); @@ -1195,7 +1198,7 @@ static journal_t *journal_init_common(struct block_device *bdev, err_cleanup: kfree(journal->j_wbuf); jbd2_journal_destroy_revoke(journal); - kfree(journal); + kfree(journal_wrapper); return NULL; } @@ -1425,11 +1428,13 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, unsigned long tail_block, int write_op) { journal_superblock_t *sb = journal->j_superblock; + journal_wrapper_t *journal_wrapper = container_of(journal, + journal_wrapper_t, jw_journal); int ret; if (is_journal_aborted(journal)) return -EIO; - if (test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags)) { + if (test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal_wrapper->j_atomic_flags)) { jbd2_journal_abort(journal, -EIO); return -EIO; } @@ -1754,6 +1759,8 @@ int jbd2_journal_load(journal_t *journal) int jbd2_journal_destroy(journal_t *journal) { int err = 0; + journal_wrapper_t *journal_wrapper = container_of(journal, + journal_wrapper_t, jw_journal); /* Wait for the commit thread to wake up and die. */ journal_kill_thread(journal); @@ -1795,7 +1802,7 @@ int jbd2_journal_destroy(journal_t *journal) * may become inconsistent. */ if (!is_journal_aborted(journal) && - test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags)) + test_bit(JBD2_CHECKPOINT_IO_ERROR, &journal_wrapper->j_atomic_flags)) jbd2_journal_abort(journal, -EIO); if (journal->j_sb_buffer) { @@ -1823,7 +1830,7 @@ int jbd2_journal_destroy(journal_t *journal) if (journal->j_chksum_driver) crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_wbuf); - kfree(journal); + kfree(journal_wrapper); return err; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 5c0446f22bee1..ef213666c3a3b 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -105,6 +105,8 @@ typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */ * This is an opaque datatype. **/ typedef struct journal_s journal_t; /* Journal control structure */ + +typedef struct journal_wrapper_s journal_wrapper_t; #endif /* @@ -780,11 +782,6 @@ struct journal_s */ unsigned long j_flags; - /** - * @j_atomic_flags: Atomic journaling state flags. - */ - unsigned long j_atomic_flags; - /** * @j_errno: * @@ -1199,6 +1196,22 @@ struct journal_s #endif }; +/** + * struct journal_wrapper_s - The wrapper of journal_s to fix KABI. + */ +struct journal_wrapper_s +{ + /** + * @jw_journal: real journal. + */ + journal_t jw_journal; + + /** + * @j_atomic_flags: Atomic journaling state flags. + */ + unsigned long j_atomic_flags; +}; + #define jbd2_might_wait_for_commit(j) \ do { \ rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ -- 2.25.1
1 0
0 0
[PATCH OLK-5.10] x86/perf: Add uncore performance monitor support for Zhaoxin CPUs
by LeoLiuoc 09 Jul '21

09 Jul '21
Zhaoxin CPUs have already provided a performance monitoring unit on hardware for uncore, but this feature has not been used. Therefore, add support for Zhaoxin CPUs to make it available to monitor the uncore performance. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  arch/x86/events/zhaoxin/Makefile |    1 +  arch/x86/events/zhaoxin/uncore.c | 1123 ++++++++++++++++++++++++++++++  arch/x86/events/zhaoxin/uncore.h |  311 +++++++++  3 files changed, 1435 insertions(+)  create mode 100644 arch/x86/events/zhaoxin/uncore.c  create mode 100644 arch/x86/events/zhaoxin/uncore.h diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile index 642c1174d662..767d6212bac1 100644 --- a/arch/x86/events/zhaoxin/Makefile +++ b/arch/x86/events/zhaoxin/Makefile @@ -1,2 +1,3 @@  # SPDX-License-Identifier: GPL-2.0  obj-y    += core.o +obj-y    += uncore.o diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c new file mode 100644 index 000000000000..96771063a61e --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.c @@ -0,0 +1,1123 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <asm/cpu_device_id.h> +#include "uncore.h" + +static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; + +/* mask of cpus that collect uncore events */ +static cpumask_t uncore_cpu_mask; + +/* constraint for the fixed counter */ +static struct event_constraint uncore_constraint_fixed = +    EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); + +static int max_packages; + +/* CHX event control */ +#define CHX_UNC_CTL_EV_SEL_MASK            0x000000ff +#define CHX_UNC_CTL_UMASK_MASK            0x0000ff00 +#define CHX_UNC_CTL_EDGE_DET            (1 << 18) +#define CHX_UNC_CTL_EN                (1 << 22) +#define CHX_UNC_CTL_INVERT            (1 << 23) +#define CHX_UNC_CTL_CMASK_MASK            0xff000000 +#define CHX_UNC_FIXED_CTR_CTL_EN        (1 << 0) + +#define CHX_UNC_RAW_EVENT_MASK (CHX_UNC_CTL_EV_SEL_MASK | \ +                         CHX_UNC_CTL_UMASK_MASK | \ +                         CHX_UNC_CTL_EDGE_DET | \ +                         CHX_UNC_CTL_INVERT | \ +                         CHX_UNC_CTL_CMASK_MASK) + +/* CHX global control register */ +#define CHX_UNC_PERF_GLOBAL_CTL                 0x391 +#define CHX_UNC_FIXED_CTR                       0x394 +#define CHX_UNC_FIXED_CTR_CTRL                  0x395 + +/* CHX uncore global control */ +#define CHX_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 4) - 1) +#define CHX_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32) + +/* CHX uncore register */ +#define CHX_UNC_PERFEVTSEL0                     0x3c0 +#define CHX_UNC_UNCORE_PMC0                     0x3b0 + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr,  char *buf) +{ +    struct uncore_event_desc *event = +        container_of(attr, struct uncore_event_desc, attr); +    return sprintf(buf, "%s", event->config); +} + +/*chx uncore support */ +static void chx_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ +    wrmsrl(event->hw.config_base, 0); +} + +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ +    u64 count; + +    rdmsrl(event->hw.event_base, count); + +    return count; +} + +static void chx_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ +    wrmsrl(CHX_UNC_PERF_GLOBAL_CTL, 0); +} + +static void chx_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ +    wrmsrl(CHX_UNC_PERF_GLOBAL_CTL, CHX_UNC_GLOBAL_CTL_EN_PC_ALL | CHX_UNC_GLOBAL_CTL_EN_FC); +} + +static void chx_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ +    struct hw_perf_event *hwc = &event->hw; + +    if (hwc->idx < UNCORE_PMC_IDX_FIXED) +        wrmsrl(hwc->config_base, hwc->config | CHX_UNC_CTL_EN); +    else +        wrmsrl(hwc->config_base, CHX_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *chx_uncore_formats_attr[] = { +    &format_attr_event.attr, +    &format_attr_umask.attr, +    &format_attr_edge.attr, +    &format_attr_inv.attr, +    &format_attr_cmask8.attr, +    NULL, +}; + +static struct attribute_group chx_uncore_format_group = { +    .name = "format", +    .attrs = chx_uncore_formats_attr, +}; + +static struct uncore_event_desc chx_uncore_events[] = { +    { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops chx_uncore_msr_ops = { +    .disable_box    = chx_uncore_msr_disable_box, +    .enable_box    = chx_uncore_msr_enable_box, +    .disable_event    = chx_uncore_msr_disable_event, +    .enable_event    = chx_uncore_msr_enable_event, +    .read_counter    = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type chx_uncore_box = { +    .name        = "", +    .num_counters   = 4, +    .num_boxes    = 1, +    .perf_ctr_bits    = 48, +    .fixed_ctr_bits    = 48, +    .event_ctl    = CHX_UNC_PERFEVTSEL0, +    .perf_ctr    = CHX_UNC_UNCORE_PMC0, +    .fixed_ctr    = CHX_UNC_FIXED_CTR, +    .fixed_ctl    = CHX_UNC_FIXED_CTR_CTRL, +    .event_mask    = CHX_UNC_RAW_EVENT_MASK, +    .event_descs    = chx_uncore_events, +    .ops        = &chx_uncore_msr_ops, +    .format_group    = &chx_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *chx_msr_uncores[] = { +    &chx_uncore_box, +    NULL, +}; + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu) +{ +    unsigned int package_id = topology_logical_package_id(cpu); + +    /* +     * The unsigned check also catches the '-1' return value for non +     * existent mappings in the topology map. +     */ +    return package_id < max_packages ? pmu->boxes[package_id] : NULL; +} + +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, +                   struct perf_event *event, int idx) +{ +    struct hw_perf_event *hwc = &event->hw; + +    hwc->idx = idx; +    hwc->last_tag = ++box->tags[idx]; + +    if (uncore_pmc_fixed(hwc->idx)) { +        hwc->event_base = uncore_fixed_ctr(box); +        hwc->config_base = uncore_fixed_ctl(box); +        return; +    } + +    hwc->config_base = uncore_event_ctl(box, hwc->idx); +    hwc->event_base  = uncore_perf_ctr(box, hwc->idx); +} + +void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ +    u64 prev_count, new_count, delta; +    int shift; + +    if (uncore_pmc_fixed(event->hw.idx)) +        shift = 64 - uncore_fixed_ctr_bits(box); +    else +        shift = 64 - uncore_perf_ctr_bits(box); + +    /* the hrtimer might modify the previous event value */ +again: +    prev_count = local64_read(&event->hw.prev_count); +    new_count = uncore_read_counter(box, event); +    if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) +        goto again; + +    delta = (new_count << shift) - (prev_count << shift); +    delta >>= shift; + +    local64_add(delta, &event->count); +} + +static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) +{ +    struct zhaoxin_uncore_box *box; +    struct perf_event *event; +    unsigned long flags; +    int bit; + +    box = container_of(hrtimer, struct zhaoxin_uncore_box, hrtimer); +    if (!box->n_active || box->cpu != smp_processor_id()) +        return HRTIMER_NORESTART; +    /* +     * disable local interrupt to prevent uncore_pmu_event_start/stop +     * to interrupt the update process +     */ +    local_irq_save(flags); + +    /* +     * handle boxes with an active event list as opposed to active +     * counters +     */ +    list_for_each_entry(event, &box->active_list, active_entry) { +        uncore_perf_event_update(box, event); +    } + +    for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) +        uncore_perf_event_update(box, box->events[bit]); + +    local_irq_restore(flags); + +    hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); +    return HRTIMER_RESTART; +} + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) +{ +    hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), +              HRTIMER_MODE_REL_PINNED); +} + +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) +{ +    hrtimer_cancel(&box->hrtimer); +} + +static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) +{ +    hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +    box->hrtimer.function = uncore_pmu_hrtimer; +} + +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, +                    int node) +{ +    int i, size, numshared = type->num_shared_regs; +    struct zhaoxin_uncore_box *box; + +    size = sizeof(*box) + numshared * sizeof(struct zhaoxin_uncore_extra_reg); + +    box = kzalloc_node(size, GFP_KERNEL, node); +    if (!box) +        return NULL; + +    for (i = 0; i < numshared; i++) +        raw_spin_lock_init(&box->shared_regs[i].lock); + +    uncore_pmu_init_hrtimer(box); +    box->cpu = -1; +    box->package_id = -1; + +    /* set default hrtimer timeout */ +    box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; + +    INIT_LIST_HEAD(&box->active_list); + +    return box; +} + +static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ +    return &box->pmu->pmu == event->pmu; +} + +static struct event_constraint * +uncore_get_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ +    struct zhaoxin_uncore_type *type = box->pmu->type; +    struct event_constraint *c; + +    if (type->ops->get_constraint) { +        c = type->ops->get_constraint(box, event); +        if (c) +            return c; +    } + +    if (event->attr.config == UNCORE_FIXED_EVENT) +        return &uncore_constraint_fixed; + +    if (type->constraints) { +        for_each_event_constraint(c, type->constraints) { +            if ((event->hw.config & c->cmask) == c->code) +                return c; +        } +    } + +    return &type->unconstrainted; +} + +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, +                    struct perf_event *event) +{ +    if (box->pmu->type->ops->put_constraint) +        box->pmu->type->ops->put_constraint(box, event); +} + +static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], int n) +{ +    unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; +    struct event_constraint *c; +    int i, wmin, wmax, ret = 0; +    struct hw_perf_event *hwc; + +    bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); + +    for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { +        c = uncore_get_event_constraint(box, box->event_list[i]); +        box->event_constraint[i] = c; +        wmin = min(wmin, c->weight); +        wmax = max(wmax, c->weight); +    } + +    /* fastpath, try to reuse previous register */ +    for (i = 0; i < n; i++) { +        hwc = &box->event_list[i]->hw; +        c = box->event_constraint[i]; + +        /* never assigned */ +        if (hwc->idx == -1) +            break; + +        /* constraint still honored */ +        if (!test_bit(hwc->idx, c->idxmsk)) +            break; + +        /* not already used */ +        if (test_bit(hwc->idx, used_mask)) +            break; + +        __set_bit(hwc->idx, used_mask); +        if (assign) +            assign[i] = hwc->idx; +    } +    /* slow path */ +    if (i != n) +        ret = perf_assign_events(box->event_constraint, n, +                     wmin, wmax, n, assign); + +    if (!assign || ret) { +        for (i = 0; i < n; i++) +            uncore_put_event_constraint(box, box->event_list[i]); +    } +    return ret ? -EINVAL : 0; +} + +static void uncore_pmu_event_start(struct perf_event *event, int flags) +{ +    struct zhaoxin_uncore_box *box = uncore_event_to_box(event); +    int idx = event->hw.idx; + + +    if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) +        return; + +    if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) +        return; + +    event->hw.state = 0; +    box->events[idx] = event; +    box->n_active++; +    __set_bit(idx, box->active_mask); + +    local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); +    uncore_enable_event(box, event); + +    if (box->n_active == 1) +        uncore_pmu_start_hrtimer(box); +} + +static void uncore_pmu_event_stop(struct perf_event *event, int flags) +{ +    struct zhaoxin_uncore_box *box = uncore_event_to_box(event); +    struct hw_perf_event *hwc = &event->hw; + +    if (__test_and_clear_bit(hwc->idx, box->active_mask)) { +        uncore_disable_event(box, event); +        box->n_active--; +        box->events[hwc->idx] = NULL; +        WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); +        hwc->state |= PERF_HES_STOPPED; + +        if (box->n_active == 0) +            uncore_pmu_cancel_hrtimer(box); +    } + +    if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { +        /* +         * Drain the remaining delta count out of a event +         * that we are disabling: +         */ +        uncore_perf_event_update(box, event); +        hwc->state |= PERF_HES_UPTODATE; +    } +} + +static int +uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, +              bool dogrp) +{ +    struct perf_event *event; +    int n, max_count; + +    max_count = box->pmu->type->num_counters; +    if (box->pmu->type->fixed_ctl) +        max_count++; + +    if (box->n_events >= max_count) +        return -EINVAL; + +    n = box->n_events; + +    if (is_box_event(box, leader)) { +        box->event_list[n] = leader; +        n++; +    } + +    if (!dogrp) +        return n; + +    for_each_sibling_event(event, leader) { +        if (!is_box_event(box, event) || +            event->state <= PERF_EVENT_STATE_OFF) +            continue; + +        if (n >= max_count) +            return -EINVAL; + +        box->event_list[n] = event; +        n++; +    } +    return n; +} + +static int uncore_pmu_event_add(struct perf_event *event, int flags) +{ +    struct zhaoxin_uncore_box *box = uncore_event_to_box(event); +    struct hw_perf_event *hwc = &event->hw; +    int assign[UNCORE_PMC_IDX_MAX]; +    int i, n, ret; + +    if (!box) +        return -ENODEV; + +    ret = n = uncore_collect_events(box, event, false); +    if (ret < 0) +        return ret; + +    hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; +    if (!(flags & PERF_EF_START)) +        hwc->state |= PERF_HES_ARCH; + +    ret = uncore_assign_events(box, assign, n); +    if (ret) +        return ret; + +    /* save events moving to new counters */ +    for (i = 0; i < box->n_events; i++) { +        event = box->event_list[i]; +        hwc = &event->hw; + +        if (hwc->idx == assign[i] && +            hwc->last_tag == box->tags[assign[i]]) +            continue; +        /* +         * Ensure we don't accidentally enable a stopped +         * counter simply because we rescheduled. +         */ +        if (hwc->state & PERF_HES_STOPPED) +            hwc->state |= PERF_HES_ARCH; + +        uncore_pmu_event_stop(event, PERF_EF_UPDATE); +    } + +    /* reprogram moved events into new counters */ +    for (i = 0; i < n; i++) { +        event = box->event_list[i]; +        hwc = &event->hw; + +        if (hwc->idx != assign[i] || +            hwc->last_tag != box->tags[assign[i]]) +            uncore_assign_hw_event(box, event, assign[i]); +        else if (i < box->n_events) +            continue; + +        if (hwc->state & PERF_HES_ARCH) +            continue; + +        uncore_pmu_event_start(event, 0); +    } +    box->n_events = n; + +    return 0; +} + +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, +                struct perf_event *event) +{ +    struct perf_event *leader = event->group_leader; +    struct zhaoxin_uncore_box *fake_box; +    int ret = -EINVAL, n; + +    fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); +    if (!fake_box) +        return -ENOMEM; + +    fake_box->pmu = pmu; +    /* +     * the event is not yet connected with its +     * siblings therefore we must first collect +     * existing siblings, then add the new event +     * before we can simulate the scheduling +     */ +    n = uncore_collect_events(fake_box, leader, true); +    if (n < 0) +        goto out; + +    fake_box->n_events = n; +    n = uncore_collect_events(fake_box, event, false); +    if (n < 0) +        goto out; + +    fake_box->n_events = n; + +    ret = uncore_assign_events(fake_box, NULL, n); +out: +    kfree(fake_box); +    return ret; +} + +static void uncore_pmu_event_del(struct perf_event *event, int flags) +{ +    struct zhaoxin_uncore_box *box = uncore_event_to_box(event); +    int i; + +    uncore_pmu_event_stop(event, PERF_EF_UPDATE); + +    for (i = 0; i < box->n_events; i++) { +        if (event == box->event_list[i]) { +            uncore_put_event_constraint(box, event); + +            for (++i; i < box->n_events; i++) +                box->event_list[i - 1] = box->event_list[i]; + +            --box->n_events; +            break; +        } +    } + +    event->hw.idx = -1; +    event->hw.last_tag = ~0ULL; +} + +static void uncore_pmu_event_read(struct perf_event *event) +{ +    struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + +    uncore_perf_event_update(box, event); +} + +static int uncore_pmu_event_init(struct perf_event *event) +{ +    struct zhaoxin_uncore_pmu *pmu; +    struct zhaoxin_uncore_box *box; +    struct hw_perf_event *hwc = &event->hw; +    int ret; + +    if (event->attr.type != event->pmu->type) +        return -ENOENT; + +    pmu = uncore_event_to_pmu(event); +    /* no device found for this pmu */ +    if (pmu->func_id < 0) +        return -ENOENT; + +    /* Sampling not supported yet */ +    if (hwc->sample_period) +        return -EINVAL; + +    /* +     * Place all uncore events for a particular physical package +     * onto a single cpu +     */ +    if (event->cpu < 0) +        return -EINVAL; +    box = uncore_pmu_to_box(pmu, event->cpu); +    if (!box || box->cpu < 0) +        return -EINVAL; +    event->cpu = box->cpu; +    event->pmu_private = box; + +    event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + +    event->hw.idx = -1; +    event->hw.last_tag = ~0ULL; +    event->hw.extra_reg.idx = EXTRA_REG_NONE; +    event->hw.branch_reg.idx = EXTRA_REG_NONE; + +    if (event->attr.config == UNCORE_FIXED_EVENT) { +        /* no fixed counter */ +        if (!pmu->type->fixed_ctl) +            return -EINVAL; +        /* +         * if there is only one fixed counter, only the first pmu +         * can access the fixed counter +         */ +        if (pmu->type->single_fixed && pmu->pmu_idx > 0) +            return -EINVAL; + +        /* fixed counters have event field hardcoded to zero */ +        hwc->config = 0ULL; +    } else { +        hwc->config = event->attr.config & +                  (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); +        if (pmu->type->ops->hw_config) { +            ret = pmu->type->ops->hw_config(box, event); +            if (ret) +                return ret; +        } +    } + +    if (event->group_leader != event) +        ret = uncore_validate_group(pmu, event); +    else +        ret = 0; + +    return ret; +} + +static void uncore_pmu_enable(struct pmu *pmu) +{ +    struct zhaoxin_uncore_pmu *uncore_pmu; +    struct zhaoxin_uncore_box *box; + +    uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); +    if (!uncore_pmu) +        return; + +    box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); +    if (!box) +        return; + +    if (uncore_pmu->type->ops->enable_box) +        uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ +    struct zhaoxin_uncore_pmu *uncore_pmu; +    struct zhaoxin_uncore_box *box; + +    uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); +    if (!uncore_pmu) +        return; + +    box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); +    if (!box) +        return; + +    if (uncore_pmu->type->ops->disable_box) +        uncore_pmu->type->ops->disable_box(box); +} + +static ssize_t uncore_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) +{ +    return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask); +} + +static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); + +static struct attribute *uncore_pmu_attrs[] = { +    &dev_attr_cpumask.attr, +    NULL, +}; + +static const struct attribute_group uncore_pmu_attr_group = { +    .attrs = uncore_pmu_attrs, +}; + +static void uncore_pmu_unregister(struct zhaoxin_uncore_pmu *pmu) +{ +    if (!pmu->registered) +        return; +    perf_pmu_unregister(&pmu->pmu); +    pmu->registered = false; +} + +static void uncore_free_boxes(struct zhaoxin_uncore_pmu *pmu) +{ +    int package; + +    for (package = 0; package < max_packages; package++) +        kfree(pmu->boxes[package]); +    kfree(pmu->boxes); +} + +static void uncore_type_exit(struct zhaoxin_uncore_type *type) +{ +    struct zhaoxin_uncore_pmu *pmu = type->pmus; +    int i; + +    if (pmu) { +        for (i = 0; i < type->num_boxes; i++, pmu++) { +            uncore_pmu_unregister(pmu); +            uncore_free_boxes(pmu); +        } +        kfree(type->pmus); +        type->pmus = NULL; +    } +    kfree(type->events_group); +    type->events_group = NULL; +} + +static void uncore_types_exit(struct zhaoxin_uncore_type **types) +{ +    for (; *types; types++) +        uncore_type_exit(*types); +} + +static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) +{ +    struct zhaoxin_uncore_pmu *pmus; +    size_t size; +    int i, j; + +    pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); +    if (!pmus) +        return -ENOMEM; + +    size = max_packages*sizeof(struct zhaoxin_uncore_box *); + +    for (i = 0; i < type->num_boxes; i++) { +        pmus[i].func_id    = setid ? i : -1; +        pmus[i].pmu_idx    = i; +        pmus[i].type    = type; +        pmus[i].boxes    = kzalloc(size, GFP_KERNEL); +        if (!pmus[i].boxes) +            goto err; +    } + +    type->pmus = pmus; +    type->unconstrainted = (struct event_constraint) +        __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, +                0, type->num_counters, 0, 0); + +    if (type->event_descs) { +        struct { +            struct attribute_group group; +            struct attribute *attrs[]; +        } *attr_group; +        for (i = 0; type->event_descs[i].attr.attr.name; i++) +            ; + +        attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), GFP_KERNEL); +        if (!attr_group) +            goto err; + +        attr_group->group.name = "events"; +        attr_group->group.attrs = attr_group->attrs; + +        for (j = 0; j < i; j++) +            attr_group->attrs[j] = &type->event_descs[j].attr.attr; + +        type->events_group = &attr_group->group; +    } + +    type->pmu_group = &uncore_pmu_attr_group; + +    return 0; + +err: +    for (i = 0; i < type->num_boxes; i++) +        kfree(pmus[i].boxes); +    kfree(pmus); + +    return -ENOMEM; +} + +static int __init +uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +{ +    int ret; + +    for (; *types; types++) { +        ret = uncore_type_init(*types, setid); +        if (ret) +            return ret; +    } +    return 0; +} + +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, +                   int new_cpu) +{ +    struct zhaoxin_uncore_pmu *pmu = type->pmus; +    struct zhaoxin_uncore_box *box; +    int i, package; + +    package = topology_logical_package_id(old_cpu < 0 ? new_cpu : old_cpu); +    for (i = 0; i < type->num_boxes; i++, pmu++) { +        box = pmu->boxes[package]; +        if (!box) +            continue; + +        if (old_cpu < 0) { +            WARN_ON_ONCE(box->cpu != -1); +            box->cpu = new_cpu; +            continue; +        } + +        WARN_ON_ONCE(box->cpu != old_cpu); +        box->cpu = -1; +        if (new_cpu < 0) +            continue; + +        uncore_pmu_cancel_hrtimer(box); +        perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); +        box->cpu = new_cpu; +    } +} + +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, +                  int old_cpu, int new_cpu) +{ +    for (; *uncores; uncores++) +        uncore_change_type_ctx(*uncores, old_cpu, new_cpu); +} + +static void uncore_box_unref(struct zhaoxin_uncore_type **types, int id) +{ +    struct zhaoxin_uncore_type *type; +    struct zhaoxin_uncore_pmu *pmu; +    struct zhaoxin_uncore_box *box; +    int i; + +    for (; *types; types++) { +        type = *types; +        pmu = type->pmus; +        for (i = 0; i < type->num_boxes; i++, pmu++) { +            box = pmu->boxes[id]; +            if (box && atomic_dec_return(&box->refcnt) == 0) +                uncore_box_exit(box); +        } +    } +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ +    int package, target; + +    /* Check if exiting cpu is used for collecting uncore events */ +    if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) +        goto unref; +    /* Find a new cpu to collect uncore events */ +    target = cpumask_any_but(topology_core_cpumask(cpu), cpu); + +    /* Migrate uncore events to the new target */ +    if (target < nr_cpu_ids) +        cpumask_set_cpu(target, &uncore_cpu_mask); +    else +        target = -1; + +    uncore_change_context(uncore_msr_uncores, cpu, target); + +unref: +    /* Clear the references */ +    package = topology_logical_package_id(cpu); +    uncore_box_unref(uncore_msr_uncores, package); +    return 0; +} + +static int allocate_boxes(struct zhaoxin_uncore_type **types, +             unsigned int package, unsigned int cpu) +{ +    struct zhaoxin_uncore_box *box, *tmp; +    struct zhaoxin_uncore_type *type; +    struct zhaoxin_uncore_pmu *pmu; +    LIST_HEAD(allocated); +    int i; + +    /* Try to allocate all required boxes */ +    for (; *types; types++) { +        type = *types; +        pmu = type->pmus; +        for (i = 0; i < type->num_boxes; i++, pmu++) { +            if (pmu->boxes[package]) +                continue; +            box = uncore_alloc_box(type, cpu_to_node(cpu)); +            if (!box) +                goto cleanup; +            box->pmu = pmu; +            box->package_id = package; +            list_add(&box->active_list, &allocated); +        } +    } +    /* Install them in the pmus */ +    list_for_each_entry_safe(box, tmp, &allocated, active_list) { +        list_del_init(&box->active_list); +        box->pmu->boxes[package] = box; +    } +    return 0; + +cleanup: +    list_for_each_entry_safe(box, tmp, &allocated, active_list) { +        list_del_init(&box->active_list); +        kfree(box); +    } +    return -ENOMEM; +} + +static int uncore_box_ref(struct zhaoxin_uncore_type **types, +              int id, unsigned int cpu) +{ +    struct zhaoxin_uncore_type *type; +    struct zhaoxin_uncore_pmu *pmu; +    struct zhaoxin_uncore_box *box; +    int i, ret; + +    ret = allocate_boxes(types, id, cpu); +    if (ret) +        return ret; + +    for (; *types; types++) { +        type = *types; +        pmu = type->pmus; +        for (i = 0; i < type->num_boxes; i++, pmu++) { +            box = pmu->boxes[id]; +            if (box && atomic_inc_return(&box->refcnt) == 1) +                uncore_box_init(box); +        } +    } +    return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ +    int package, target, msr_ret; + +    package = topology_logical_package_id(cpu); +    msr_ret = uncore_box_ref(uncore_msr_uncores, package, cpu); + +    if (msr_ret) +        return -ENOMEM; + +    /* +     * Check if there is an online cpu in the package +     * which collects uncore events already. +     */ +    target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); +    if (target < nr_cpu_ids) +        return 0; + +    cpumask_set_cpu(cpu, &uncore_cpu_mask); + +    if (!msr_ret) +        uncore_change_context(uncore_msr_uncores, -1, cpu); + +    return 0; +} + +static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) +{ +    int ret; + +    if (!pmu->type->pmu) { +        pmu->pmu = (struct pmu) { +            .attr_groups    = pmu->type->attr_groups, +            .task_ctx_nr    = perf_invalid_context, +            .pmu_enable    = uncore_pmu_enable, +            .pmu_disable    = uncore_pmu_disable, +            .event_init    = uncore_pmu_event_init, +            .add        = uncore_pmu_event_add, +            .del        = uncore_pmu_event_del, +            .start        = uncore_pmu_event_start, +            .stop        = uncore_pmu_event_stop, +            .read        = uncore_pmu_event_read, +            .module        = THIS_MODULE, +            .capabilities    = PERF_PMU_CAP_NO_EXCLUDE, +        }; +    } else { +        pmu->pmu = *pmu->type->pmu; +        pmu->pmu.attr_groups = pmu->type->attr_groups; +    } + +    if (pmu->type->num_boxes == 1) { +        if (strlen(pmu->type->name) > 0) +            sprintf(pmu->name, "uncore_%s", pmu->type->name); +        else +            sprintf(pmu->name, "uncore"); +    } else { +        sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, +            pmu->pmu_idx); +    } + +    ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); +    if (!ret) +        pmu->registered = true; +    return ret; +} + +static int __init type_pmu_register(struct zhaoxin_uncore_type *type) +{ +    int i, ret; + +    for (i = 0; i < type->num_boxes; i++) { +        ret = uncore_pmu_register(&type->pmus[i]); +        if (ret) +            return ret; +    } +    return 0; +} + +static int __init uncore_msr_pmus_register(void) +{ +    struct zhaoxin_uncore_type **types = uncore_msr_uncores; +    int ret; + +    for (; *types; types++) { +        ret = type_pmu_register(*types); +        if (ret) +            return ret; +    } +    return 0; +} + +static int __init uncore_cpu_init(void) +{ +    int ret; + +    ret = uncore_types_init(uncore_msr_uncores, true); +    if (ret) +        goto err; + +    ret = uncore_msr_pmus_register(); +    if (ret) +        goto err; +    return 0; +err: +    uncore_types_exit(uncore_msr_uncores); +    uncore_msr_uncores = empty_uncore; +    return ret; +} + +struct zhaoxin_uncore_init_fun { +    void    (*cpu_init)(void); +}; + +void chx_uncore_cpu_init(void) +{ +    uncore_msr_uncores = chx_msr_uncores; +} + +static const struct zhaoxin_uncore_init_fun chx_uncore_init __initconst = { +    .cpu_init = chx_uncore_cpu_init, +}; + +static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { +    X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_ZXD, &chx_uncore_init), +    X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_ZXE, &chx_uncore_init), +    X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_ZXD, &chx_uncore_init), +    X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_ZXE, &chx_uncore_init), +    {}, +}; + +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); + +static int __init zhaoxin_uncore_init(void) +{ +    const struct x86_cpu_id *id; +    struct zhaoxin_uncore_init_fun *uncore_init; +    int cret = 0, ret; + +    id = x86_match_cpu(zhaoxin_uncore_match); + +    if (!id) +        return -ENODEV; + +    if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) +        return -ENODEV; + +    max_packages = topology_max_packages(); + +    pr_info("welcome to uncore!\n"); + +    uncore_init = (struct zhaoxin_uncore_init_fun *)id->driver_data; + +    if (uncore_init->cpu_init) { +        uncore_init->cpu_init(); +        cret = uncore_cpu_init(); +    } + +    if (cret) +        return -ENODEV; + +    ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, +                "perf/x86/zhaoxin/uncore:online", +                uncore_event_cpu_online, +                uncore_event_cpu_offline); +    pr_info("zhaoxin uncore init success!\n"); +    if (ret) +        goto err; +    return 0; + +err: +    uncore_types_exit(uncore_msr_uncores); +    return ret; +} +module_init(zhaoxin_uncore_init); + +static void __exit zhaoxin_uncore_exit(void) +{ +    cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); +    uncore_types_exit(uncore_msr_uncores); +} +module_exit(zhaoxin_uncore_exit); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h new file mode 100644 index 000000000000..e0f4ec340725 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Zhaoxin PMU; like Intel Architectural PerfMon-v2 + */ +#include <linux/slab.h> +#include <linux/pci.h> +#include <asm/apicdef.h> +#include <linux/io-64-nonatomic-lo-hi.h> + +#include <linux/perf_event.h> +#include "../perf_event.h" + +#define ZHAOXIN_FAM7_ZXD        0x1b +#define ZHAOXIN_FAM7_ZXE        0x3b + +#define UNCORE_PMU_NAME_LEN        32 +#define UNCORE_PMU_HRTIMER_INTERVAL    (60LL * NSEC_PER_SEC) +#define UNCORE_CHX_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC) + + +#define UNCORE_FIXED_EVENT              0xff +#define UNCORE_PMC_IDX_MAX_GENERIC      4 +#define UNCORE_PMC_IDX_MAX_FIXED        1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC + +#define UNCORE_PMC_IDX_MAX              (UNCORE_PMC_IDX_FIXED + 1) + +struct zhaoxin_uncore_ops; +struct zhaoxin_uncore_pmu; +struct zhaoxin_uncore_box; +struct uncore_event_desc; + +struct zhaoxin_uncore_type { +    const char *name; +    int num_counters; +    int num_boxes; +    int perf_ctr_bits; +    int fixed_ctr_bits; +    unsigned int perf_ctr; +    unsigned int event_ctl; +    unsigned int event_mask; +    unsigned int event_mask_ext; +    unsigned int fixed_ctr; +    unsigned int fixed_ctl; +    unsigned int box_ctl; +    unsigned int msr_offset; +    unsigned int num_shared_regs:8; +    unsigned int single_fixed:1; +    unsigned int pair_ctr_ctl:1; +    unsigned int *msr_offsets; +    struct event_constraint unconstrainted; +    struct event_constraint *constraints; +    struct zhaoxin_uncore_pmu *pmus; +    struct zhaoxin_uncore_ops *ops; +    struct uncore_event_desc *event_descs; +    const struct attribute_group *attr_groups[4]; +    struct pmu *pmu; /* for custom pmu ops */ +}; + +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] + +struct zhaoxin_uncore_ops { +    void (*init_box)(struct zhaoxin_uncore_box *); +    void (*exit_box)(struct zhaoxin_uncore_box *); +    void (*disable_box)(struct zhaoxin_uncore_box *); +    void (*enable_box)(struct zhaoxin_uncore_box *); +    void (*disable_event)(struct zhaoxin_uncore_box *, struct perf_event *); +    void (*enable_event)(struct zhaoxin_uncore_box *, struct perf_event *); +    u64 (*read_counter)(struct zhaoxin_uncore_box *, struct perf_event *); +    int (*hw_config)(struct zhaoxin_uncore_box *, struct perf_event *); +    struct event_constraint *(*get_constraint)(struct zhaoxin_uncore_box *, +                           struct perf_event *); +    void (*put_constraint)(struct zhaoxin_uncore_box *, struct perf_event *); +}; + +struct zhaoxin_uncore_pmu { +    struct pmu            pmu; +    char                name[UNCORE_PMU_NAME_LEN]; +    int                pmu_idx; +    int                func_id; +    bool                registered; +    atomic_t            activeboxes; +    struct zhaoxin_uncore_type    *type; +    struct zhaoxin_uncore_box    **boxes; +}; + +struct zhaoxin_uncore_extra_reg { +    raw_spinlock_t lock; +    u64 config, config1, config2; +    atomic_t ref; +}; + +struct zhaoxin_uncore_box { +    int pci_phys_id; +    int package_id;    /*Package ID */ +    int n_active;    /* number of active events */ +    int n_events; +    int cpu;    /* cpu to collect events */ +    unsigned long flags; +    atomic_t refcnt; +    struct perf_event *events[UNCORE_PMC_IDX_MAX]; +    struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; +    struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; +    unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; +    u64 tags[UNCORE_PMC_IDX_MAX]; +    struct pci_dev *pci_dev; +    struct zhaoxin_uncore_pmu *pmu; +    u64 hrtimer_duration; /* hrtimer timeout for this box */ +    struct hrtimer hrtimer; +    struct list_head list; +    struct list_head active_list; +    void __iomem *io_addr; +    struct zhaoxin_uncore_extra_reg shared_regs[0]; +}; + +#define UNCORE_BOX_FLAG_INITIATED    0 + +struct uncore_event_desc { +    struct device_attribute attr; +    const char *config; +}; + +ssize_t zx_uncore_event_show(struct device *dev, +            struct device_attribute *attr, char *buf); + +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config)            \ +{                                \ +    .attr    = __ATTR(_name, 0444, zx_uncore_event_show, NULL),    \ +    .config    = _config,                    \ +} + +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)     \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ +                struct device_attribute *attr,        \ +                char *page)                \ +{                                    \ +    BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);            \ +    return sprintf(page, _format "\n");                \ +}                                    \ +static struct device_attribute format_attr_##_var =            \ +    __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +static inline bool uncore_pmc_fixed(int idx) +{ +    return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) +{ +    struct zhaoxin_uncore_pmu *pmu = box->pmu; + +    return pmu->type->msr_offsets ? +        pmu->type->msr_offsets[pmu->pmu_idx] : +        pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) +{ +    if (!box->pmu->type->box_ctl) +        return 0; +    return box->pmu->type->box_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctl(struct zhaoxin_uncore_box *box) +{ +    if (!box->pmu->type->fixed_ctl) +        return 0; +    return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) +{ +    return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ +    return box->pmu->type->event_ctl + +        (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + +        uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ +    return box->pmu->type->perf_ctr + +        (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + +        uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +{ +    return uncore_msr_fixed_ctl(box); +} + +static inline +unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +{ +    return uncore_msr_fixed_ctr(box); +} + +static inline +unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ +    return uncore_msr_event_ctl(box, idx); +} + +static inline +unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ +    return uncore_msr_perf_ctr(box, idx); +} + +static inline int uncore_perf_ctr_bits(struct zhaoxin_uncore_box *box) +{ +    return box->pmu->type->perf_ctr_bits; +} + +static inline int uncore_fixed_ctr_bits(struct zhaoxin_uncore_box *box) +{ +    return box->pmu->type->fixed_ctr_bits; +} + +static inline int uncore_num_counters(struct zhaoxin_uncore_box *box) +{ +    return box->pmu->type->num_counters; +} + +static inline void uncore_disable_box(struct zhaoxin_uncore_box *box) +{ +    if (box->pmu->type->ops->disable_box) +        box->pmu->type->ops->disable_box(box); +} + +static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) +{ +    if (box->pmu->type->ops->enable_box) +        box->pmu->type->ops->enable_box(box); +} + +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, +                struct perf_event *event) +{ +    box->pmu->type->ops->disable_event(box, event); +} + +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, +                struct perf_event *event) +{ +    box->pmu->type->ops->enable_event(box, event); +} + +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, +                struct perf_event *event) +{ +    return box->pmu->type->ops->read_counter(box, event); +} + +static inline void uncore_box_init(struct zhaoxin_uncore_box *box) +{ +    if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { +        if (box->pmu->type->ops->init_box) +            box->pmu->type->ops->init_box(box); +    } +} + +static inline void uncore_box_exit(struct zhaoxin_uncore_box *box) +{ +    if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { +        if (box->pmu->type->ops->exit_box) +            box->pmu->type->ops->exit_box(box); +    } +} + +static inline bool uncore_box_is_fake(struct zhaoxin_uncore_box *box) +{ +    return (box->package_id < 0); +} + +static inline struct zhaoxin_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +{ +    return container_of(event->pmu, struct zhaoxin_uncore_pmu, pmu); +} + +static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *event) +{ +    return event->pmu_private; +} + + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_event_start(struct perf_event *event, int flags); +static void uncore_pmu_event_stop(struct perf_event *event, int flags); +static int uncore_pmu_event_add(struct perf_event *event, int flags); +static void uncore_pmu_event_del(struct perf_event *event, int flags); +static void uncore_pmu_event_read(struct perf_event *event); +static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint * +uncore_get_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); + +void chx_uncore_cpu_init(void); -- 2.20.1
1 0
0 0
[PATCH OLK-5.10] iommu/vt-d:Add support for detecting ACPI device in RMRR
by LeoLiuoc 09 Jul '21

09 Jul '21
Some ACPI devices need to issue dma requests to access the reserved memory area.BIOS uses the device scope type ACPI_NAMESPACE_DEVICE in RMRR to report these ACPI devices. This patch add support for detecting ACPI devices in RMRR and in order to distinguish it from PCI device, some interface functions are modified. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  drivers/iommu/intel/dmar.c  | 77 +++++++++++++++++++--------------  drivers/iommu/intel/iommu.c | 86 ++++++++++++++++++++++++++++++++++---  drivers/iommu/iommu.c       |  6 +++  include/linux/dmar.h        | 11 ++++-  include/linux/iommu.h       |  3 ++  5 files changed, 142 insertions(+), 41 deletions(-) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index b8d0b56a7575..1d705589fe21 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -215,7 +215,7 @@ static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,  }  /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ -int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, +int dmar_pci_insert_dev_scope(struct dmar_pci_notify_info *info,                void *start, void*end, u16 segment,                struct dmar_dev_scope *devices,                int devices_cnt) @@ -304,7 +304,7 @@ static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)          drhd = container_of(dmaru->hdr,                      struct acpi_dmar_hardware_unit, header); -        ret = dmar_insert_dev_scope(info, (void *)(drhd + 1), +        ret = dmar_pci_insert_dev_scope(info, (void *)(drhd + 1),                  ((void *)drhd) + drhd->header.length,                  dmaru->segment,                  dmaru->devices, dmaru->devices_cnt); @@ -719,47 +719,58 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)      return dmaru;  } -static void __init dmar_acpi_insert_dev_scope(u8 device_number, -                          struct acpi_device *adev) +/* Return: > 0 if match found, 0 if no match found */ +bool dmar_acpi_insert_dev_scope(u8 device_number, +                struct acpi_device *adev, +                void *start, void *end, +                struct dmar_dev_scope *devices, +                int devices_cnt)  { -    struct dmar_drhd_unit *dmaru; -    struct acpi_dmar_hardware_unit *drhd;      struct acpi_dmar_device_scope *scope;      struct device *tmp;      int i;      struct acpi_dmar_pci_path *path; +    for (; start < end; start += scope->length) { +        scope = start; +        if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) +            continue; +        if (scope->enumeration_id != device_number) +            continue; +        path = (void *)(scope + 1); +        for_each_dev_scope(devices, devices_cnt, i, tmp) +            if (tmp == NULL) { +                devices[i].bus = scope->bus; +                devices[i].devfn = PCI_DEVFN(path->device, path->function); +                rcu_assign_pointer(devices[i].dev, +                            get_device(&adev->dev)); +                return true; +            } +        WARN_ON(i >= devices_cnt); +    } +    return false; +} + +static int dmar_acpi_bus_add_dev(u8 device_number, struct acpi_device *adev) +{ +    struct dmar_drhd_unit *dmaru; +    struct acpi_dmar_hardware_unit *drhd; +    int ret; +      for_each_drhd_unit(dmaru) {          drhd = container_of(dmaru->hdr,                      struct acpi_dmar_hardware_unit,                      header); - -        for (scope = (void *)(drhd + 1); -             (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; -             scope = ((void *)scope) + scope->length) { -            if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) -                continue; -            if (scope->enumeration_id != device_number) -                continue; - -            path = (void *)(scope + 1); -            pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", -                dev_name(&adev->dev), dmaru->reg_base_addr, -                scope->bus, path->device, path->function); -            for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) -                if (tmp == NULL) { -                    dmaru->devices[i].bus = scope->bus; -                    dmaru->devices[i].devfn = PCI_DEVFN(path->device, -                                        path->function); -                    rcu_assign_pointer(dmaru->devices[i].dev, -                               get_device(&adev->dev)); -                    return; -                } -            BUG_ON(i >= dmaru->devices_cnt); -        } +        ret = dmar_acpi_insert_dev_scope(device_number, adev, (void *)(drhd+1), +                        ((void *)drhd)+drhd->header.length, +                        dmaru->devices, dmaru->devices_cnt); +        if (ret) +            break;      } -    pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", -        device_number, dev_name(&adev->dev)); +    if (ret > 0) +        ret = dmar_rmrr_add_acpi_dev(device_number, adev); + +    return ret;  }  static int __init dmar_acpi_dev_scope_init(void) @@ -788,7 +799,7 @@ static int __init dmar_acpi_dev_scope_init(void)                         andd->device_name);                  continue;              } -            dmar_acpi_insert_dev_scope(andd->device_number, adev); +            dmar_acpi_bus_add_dev(andd->device_number, adev);          }      }      return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 6cc6ef585aa4..5f2b7a64d2c7 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4600,6 +4600,25 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)      return ret;  } +int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ +    int ret; +    struct dmar_rmrr_unit *rmrru; +    struct acpi_dmar_reserved_memory *rmrr; + +    list_for_each_entry(rmrru, &dmar_rmrr_units, list) { +        rmrr = container_of(rmrru->hdr, +                struct acpi_dmar_reserved_memory, +                header); +        ret = dmar_acpi_insert_dev_scope(device_number, adev, (void *)(rmrr + 1), +                        ((void *)rmrr) + rmrr->header.length, +                        rmrru->devices, rmrru->devices_cnt); +        if (ret) +            break; +    } +    return 0; +} +  int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)  {      int ret; @@ -4615,7 +4634,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)          rmrr = container_of(rmrru->hdr,                      struct acpi_dmar_reserved_memory, header);          if (info->event == BUS_NOTIFY_ADD_DEVICE) { -            ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1), +            ret = dmar_pci_insert_dev_scope(info, (void *)(rmrr + 1),                  ((void *)rmrr) + rmrr->header.length,                  rmrr->segment, rmrru->devices,                  rmrru->devices_cnt); @@ -4633,7 +4652,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)          atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);          if (info->event == BUS_NOTIFY_ADD_DEVICE) { -            ret = dmar_insert_dev_scope(info, (void *)(atsr + 1), +            ret = dmar_pci_insert_dev_scope(info, (void *)(atsr + 1),                      (void *)atsr + atsr->header.length,                      atsr->segment, atsru->devices,                      atsru->devices_cnt); @@ -4872,6 +4891,22 @@ static int __init platform_optin_force_iommu(void)      return 1;  } +static int acpi_device_create_direct_mappings(struct device *pn_dev, struct device *acpi_device) +{ +    struct iommu_group *group; + +    acpi_device->bus->iommu_ops = &intel_iommu_ops; +    group = iommu_group_get(pn_dev); +    if (!group) { +        pr_warn("ACPI name space devices create direct mappings wrong!\n"); +        return -EINVAL; +    } +    printk(KERN_INFO "pn_dev:%s enter to %s\n", dev_name(pn_dev), __func__); +    __acpi_device_create_direct_mappings(group, acpi_device); + +    return 0; +} +  static int __init probe_acpi_namespace_devices(void)  {      struct dmar_drhd_unit *drhd; @@ -4879,6 +4914,7 @@ static int __init probe_acpi_namespace_devices(void)      struct intel_iommu *iommu __maybe_unused;      struct device *dev;      int i, ret = 0; +    u8 bus, devfn;      for_each_active_iommu(iommu, drhd) {          for_each_active_dev_scope(drhd->devices, @@ -4887,6 +4923,8 @@ static int __init probe_acpi_namespace_devices(void)              struct iommu_group *group;              struct acpi_device *adev; +            struct device *pn_dev = NULL; +            struct device_domain_info *info = NULL;              if (dev->bus != &acpi_bus_type)                  continue; @@ -4896,19 +4934,53 @@ static int __init probe_acpi_namespace_devices(void)                          &adev->physical_node_list, node) {                  group = iommu_group_get(pn->dev);                  if (group) { +                    pn_dev = pn->dev;                      iommu_group_put(group);                      continue;                  } -                pn->dev->bus->iommu_ops = &intel_iommu_ops; -                ret = iommu_probe_device(pn->dev); -                if (ret) -                    break; +                iommu = device_to_iommu(dev, &bus, &devfn); +                if (!iommu) +                    return -ENODEV; +                info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); +                if (!info) { +                    pn->dev->bus->iommu_ops = &intel_iommu_ops; +                    ret = iommu_probe_device(pn->dev); +                    if (ret) { +                        pr_err("pn->dev:%s probe fail! ret:%d\n", +                            dev_name(pn->dev), ret); +                        goto unlock; +                    } +                } +                pn_dev = pn->dev; +            } +            if (!pn_dev) { +                iommu = device_to_iommu(dev, &bus, &devfn); +                if (!iommu) +                    return -ENODEV; +                info = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); +                if (!info) { +                    dev->bus->iommu_ops = &intel_iommu_ops; +                    ret = iommu_probe_device(dev); +                    if (ret) { +                        pr_err("dev:%s probe fail! ret:%d\n", +                            dev_name(dev), ret); +                        goto unlock; +                    } +                    goto unlock; +                }              } +            if (!info) +                ret = acpi_device_create_direct_mappings(pn_dev, dev); +            else +                ret = acpi_device_create_direct_mappings(info->dev, dev); +unlock:              mutex_unlock(&adev->physical_node_lock); -            if (ret) +            if (ret) { +                pr_err("%s fail! ret:%d\n", __func__, ret);                  return ret; +            }          }      } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 86e3dbdfb7bd..6212eb1856f5 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -814,6 +814,12 @@ static bool iommu_is_attach_deferred(struct iommu_domain *domain,      return false;  } +void  __acpi_device_create_direct_mappings(struct iommu_group *group, struct device *acpi_device) +{ +    iommu_create_device_direct_mappings(group, acpi_device); +} +EXPORT_SYMBOL_GPL(__acpi_device_create_direct_mappings); +  /**   * iommu_group_add_device - add a device to an iommu group   * @group: the group into which to add the device (reference should be held) diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 65565820328a..248e3c2feeae 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -113,10 +113,13 @@ extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,                  struct dmar_dev_scope **devices, u16 segment);  extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);  extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); -extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, +extern int dmar_pci_insert_dev_scope(struct dmar_pci_notify_info *info,                   void *start, void*end, u16 segment,                   struct dmar_dev_scope *devices,                   int devices_cnt); +extern bool dmar_acpi_insert_dev_scope(u8 device_number, +                struct acpi_device *adev, void *start, void *end, +                struct dmar_dev_scope *devices, int devices_cnt);  extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,                   u16 segment, struct dmar_dev_scope *devices,                   int count); @@ -140,6 +143,7 @@ extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);  extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);  extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);  extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +extern int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev);  extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);  #else /* !CONFIG_INTEL_IOMMU: */  static inline int intel_iommu_init(void) { return -ENODEV; } @@ -150,6 +154,11 @@ static inline void intel_iommu_shutdown(void) { }  #define    dmar_check_one_atsr        dmar_res_noop  #define    dmar_release_one_atsr        dmar_res_noop +static inline int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ +    return 0; +} +  static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)  {      return 0; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3ff424d4f481..66ae2b7d65de 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -546,6 +546,9 @@ extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)  extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,                    unsigned long iova, int flags); +extern void __acpi_device_create_direct_mappings(struct iommu_group *group, +                        struct device *acpi_device); +  static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)  {      if (domain->ops->flush_iotlb_all) -- 2.20.1
1 0
0 0
[PATCH OLK-5.10] USB:Fix kernel NULL pointer when unbind UHCI form vfio-pci
by LeoLiuoc 09 Jul '21

09 Jul '21
This bug is found in Zhaoxin platform, but it's a commom code bug. Fail sequence: step1: Unbind UHCI controller from native driver; step2: Bind UHCI controller to vfio-pci, which will put UHCI controller in one vfio        group's device list and set UHCI's dev->driver_data to struct vfio-pci(for UHCI) step3: Unbind EHCI controller from native driver, will try to tell UHCI native driver        that "I'm removed by set companion_hcd->self.hs_companion to NULL. However,        companion_hcd get from UHCI's dev->driver_data that has modified by vfio-pci        already.So, the vfio-pci structure will be damaged! step4: Bind EHCI controller to vfio-pci driver, which will put EHCI controller in the        same vfio group as UHCI controller;      ... ... step5: Unbind UHCI controller from vfio-pci, which will delete UHCI from vfio group'        device list that has been damaged in step 3. So,delete operation can random        result into a NULL pointer dereference with the below stack dump. step6: Bind UHCI controller to native driver; step7: Unbind EHCI controller from vfio-pci, which will try to remove EHCI controller        from the vfio group; step8: Bind EHCI controller to native driver; [  929.114641] uhci_hcd 0000:00:10.0: remove, state 1 [ 929.114652] usb usb1: USB disconnect, device number 1 [ 929.114655] usb 1-1: USB disconnect, device number 2 [ 929.270313] usb 1-2: USB disconnect, device number 3 [ 929.318404] uhci_hcd 0000:00:10.0: USB bus 1 deregistered [ 929.343029] uhci_hcd 0000:00:10.1: remove, state 4 [  929.343045] usb usb3: USB disconnect, device number 1 [  929.343685] uhci_hcd 0000:00:10.1: USB bus 3 deregistered [  929.369087] ehci-pci 0000:00:10.7: remove, state 4 [  929.369102] usb usb4: USB disconnect, device number 1 [  929.370325] ehci-pci 0000:00:10.7: USB bus 4 deregistered [  932.398494] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [  932.398496] PGD 42a67d067 P4D 42a67d067 PUD 42a65f067 PMD 0 [  932.398502] Oops: 0002 [#2] SMP NOPTI [  932.398505] CPU: 2 PID: 7824 Comm: vfio_unbind.sh Tainted: P D  4.19.65-2020051917-rainos #1 [  932.398506] Hardware name: Shanghai Zhaoxin Semiconductor Co., Ltd. HX002EH/HX002EH,            BIOS HX002EH0_01_R480_R_200408 04/08/2020 [ 932.398513] RIP: 0010:vfio_device_put+0x31/0xa0 [vfio] [ 932.398515] Code: 89 e5 41 54 53 4c 8b 67 18 48 89 fb 49 8d 74 24 30 e8 e3 0e f3 de         84 c0 74 67 48 8b 53 20 48 8b 43 28 48 8b 7b 18 48 89 42 08 <48> 89 10         48 b8 00 01 00 00 00 00 ad de 48 89 43 20 48 b8 00 02 00 [  932.398516] RSP: 0018:ffffbbfd04cffc18 EFLAGS: 00010202 [ 932.398518] RAX: 0000000000000000 RBX: ffff92c7ea717880 RCX: 0000000000000000 [  932.398519] RDX: ffff92c7ea713620 RSI: ffff92c7ea713630 RDI: ffff92c7ea713600 [  932.398521] RBP: ffffbbfd04cffc28 R08: ffff92c7f02a8080 R09: ffff92c7efc03980 [ 932.398522] R10: ffffbbfd04cff9a8 R11: 0000000000000000 R12: ffff92c7ea713600 [  932.398523] R13: ffff92c7ed8bb0a8 R14: ffff92c7ea717880 R15: 0000000000000000 [  932.398525] FS: 00007f3031500740(0000) GS:ffff92c7f0280000(0000) knlGS:0000000000000000 [  932.398526] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [  932.398527] CR2: 0000000000000000 CR3: 0000000428626004 CR4: 0000000000160ee0 [  932.398528] Call Trace: [  932.398534]  vfio_del_group_dev+0xe8/0x2a0 [vfio] [ 932.398539]  ? __blocking_notifier_call_chain+0x52/0x60 [  932.398542]  ? do_wait_intr_irq+0x90/0x90 [  932.398546]  ? iommu_bus_notifier+0x75/0x100 [  932.398551] vfio_pci_remove+0x20/0xa0 [vfio_pci] [  932.398554] pci_device_remove+0x3e/0xc0 [  932.398557] device_release_driver_internal+0x17a/0x240 [  932.398560]  device_release_driver+0x12/0x20 [  932.398561] unbind_store+0xee/0x180 [  932.398564]  drv_attr_store+0x27/0x40 [  932.398567]  sysfs_kf_write+0x3c/0x50 [  932.398568] kernfs_fop_write+0x125/0x1a0 [  932.398572] __vfs_write+0x3a/0x190 [  932.398575]  ? apparmor_file_permission+0x1a/0x20 [  932.398577]  ? security_file_permission+0x3b/0xc0 [  932.398581]  ? _cond_resched+0x1a/0x50 [  932.398582] vfs_write+0xb8/0x1b0 [  932.398584]  ksys_write+0x5c/0xe0 [ 932.398586]  __x64_sys_write+0x1a/0x20 [  932.398589] do_syscall_64+0x5a/0x110 [  932.398592] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Using virt-manager/qemu to boot guest os, we can see the same fail sequence! Fix this by determine whether the PCI Driver of the USB controller is a kernel native driver. If not, do not let it modify UHCI's dev->driver_data. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  drivers/usb/core/hcd-pci.c | 10 ++++++++++  1 file changed, 10 insertions(+) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index ec0d6c50610c..000ee7a6731f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -49,6 +49,7 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,      struct pci_dev        *companion;      struct usb_hcd        *companion_hcd;      unsigned int        slot = PCI_SLOT(pdev->devfn); +    struct pci_driver    *drv;      /*       * Iterate through other PCI functions in the same slot. @@ -61,6 +62,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,                  PCI_SLOT(companion->devfn) != slot)              continue; +        drv = companion->driver; +        if (!drv) +            continue; + +        if (strncmp(drv->name, "uhci_hcd", sizeof("uhci_hcd") - 1) && +            strncmp(drv->name, "ooci_hcd", sizeof("uhci_hcd") - 1) && +            strncmp(drv->name, "ehci_hcd", sizeof("uhci_hcd") - 1)) +            continue; +          /*           * Companion device should be either UHCI,OHCI or EHCI host           * controller, otherwise skip. -- 2.20.1
1 0
0 0
[PATCH OLK-5.10] xhci: fix issue with resume from system Sx state
by LeoLiuoc 09 Jul '21

09 Jul '21
On Zhaoxin ZX-100 project, xHCI can't work normally after resume from system Sx state. To fix this issue, when resume from system sx state, reinitialize xHCI instead of restore. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  drivers/usb/host/xhci-pci.c | 3 +++  1 file changed, 3 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 471cf3e96032..44dd77343cc1 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -307,6 +307,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)      if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)          xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; +    if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && pdev->device == 0x9202) +        xhci->quirks |= XHCI_RESET_ON_RESUME; +      if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||           pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&           pdev->device == 0x9026) -- 2.20.1
1 0
0 0
[PATCH OLK-5.10] xhci: Adjust the UHCI Controllers bit value
by LeoLiuoc 09 Jul '21

09 Jul '21
Over Current condition is not standardized in the UHCI spec. Zhaoxin UHCI controllers report Over Current active off. Intel controllers report it active on, so we'll adjust the bit value. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  drivers/usb/host/uhci-pci.c | 3 +++  1 file changed, 3 insertions(+) diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c index 9b88745d247f..d90c391d4899 100644 --- a/drivers/usb/host/uhci-pci.c +++ b/drivers/usb/host/uhci-pci.c @@ -134,6 +134,9 @@ static int uhci_pci_init(struct usb_hcd *hcd)      if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)          device_set_wakeup_capable(uhci_dev(uhci), true); +    if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN) +        uhci->oc_low = 1; +      /* Set up pointers to PCI-specific functions */      uhci->reset_hc = uhci_pci_reset_hc;      uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; -- 2.20.1
1 0
0 0
[PATCH OLK-5.10 3/3] ALSA: hda: Add support of Zhaoxin NB HDAC codec
by LeoLiuoc 09 Jul '21

09 Jul '21
Add Zhaoxin NB HDAC codec support. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  sound/pci/hda/patch_hdmi.c | 26 ++++++++++++++++++++++++++  1 file changed, 26 insertions(+) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 6d2a4dfcfe43..f541aeb4ecca 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4249,6 +4249,20 @@ static int patch_via_hdmi(struct hda_codec *codec)      return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID);  } +/* ZHAOXIN HDMI Implementation */ +static int patch_zx_hdmi(struct hda_codec *codec) +{ +    int err; + +    err = patch_generic_hdmi(codec); +    codec->no_sticky_stream = 1; + +    if (err) +        return err; + +    return 0; +} +  /*   * patch entries   */ @@ -4342,6 +4356,12 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",    patch_via_hdmi),  HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP",    patch_via_hdmi),  HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi),  HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f86, "CND001 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f87, "CND001 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f88, "CHX001 HDMI/DP",    patch_zx_hdmi), +HDA_CODEC_ENTRY(0x11069f89, "CHX001 HDMI/DP",    patch_zx_hdmi), +HDA_CODEC_ENTRY(0x11069f8a, "CHX002 HDMI/DP",    patch_zx_hdmi), +HDA_CODEC_ENTRY(0x11069f8b, "CHX002 HDMI/DP",    patch_zx_hdmi),  HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi),  HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi),  HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), @@ -4369,6 +4389,12 @@ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI",    patch_generic_hdmi),  HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),  HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),  HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f86, "CND001 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f87, "CND001 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f88, "CHX001 HDMI/DP",    patch_zx_hdmi), +HDA_CODEC_ENTRY(0x1d179f89, "CHX001 HDMI/DP",    patch_zx_hdmi), +HDA_CODEC_ENTRY(0x1d179f8a, "CHX002 HDMI/DP",    patch_zx_hdmi), +HDA_CODEC_ENTRY(0x1d179f8b, "CHX002 HDMI/DP",    patch_zx_hdmi),  /* special ID for generic HDMI */  HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi),  {} /* terminator */ -- 2.20.1
1 0
0 0
[PATCH OLK-5.10 2/3] ALSA: hda: Add support of Zhaoxin NB HDAC
by LeoLiuoc 09 Jul '21

09 Jul '21
Add the new PCI ID 0x1d17 0x9141/0x9142/0x9144 Zhaoxin NB HDAC support. And add some special initialization for Zhaoxin NB HDAC. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  sound/pci/hda/hda_controller.c | 17 ++++++++++-  sound/pci/hda/hda_controller.h |  2 ++  sound/pci/hda/hda_intel.c      | 53 +++++++++++++++++++++++++++++++++-  3 files changed, 70 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index b972d59eb1ec..d6de0b1fcb66 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1057,6 +1057,16 @@ void azx_stop_chip(struct azx *chip)  }  EXPORT_SYMBOL_GPL(azx_stop_chip); +static void azx_rirb_zxdelay(struct azx *chip, int enable) +{ +    if (chip->remap_diu_addr) { +        if (!enable) +            writel(0x0, (char *)chip->remap_diu_addr + 0x490a8); +        else +            writel(0x1000000, (char *)chip->remap_diu_addr + 0x490a8); +    } +} +  /*   * interrupt handler   */ @@ -1116,9 +1126,14 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)              azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);              active = true;              if (status & RIRB_INT_RESPONSE) { -                if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) +                if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) || +                    (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)) { +                    azx_rirb_zxdelay(chip, 1);                      udelay(80); +                }                  snd_hdac_bus_update_rirb(bus); +                if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) +                    azx_rirb_zxdelay(chip, 0);              }          }      } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 68f9668788ea..543b9f75cf42 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -45,6 +45,7 @@  #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)    /* CORBRP clears itself after reset */  #define AZX_DCAPS_NO_MSI64      (1 << 29)    /* Stick to 32-bit MSIs */  #define AZX_DCAPS_SEPARATE_STREAM_TAG    (1 << 30) /* capture and playback use separate stream tag */ +#define AZX_DCAPS_RIRB_PRE_DELAY  (1 << 31)  enum {      AZX_SNOOP_TYPE_NONE, @@ -147,6 +148,7 @@ struct azx {      /* GTS present */      unsigned int gts_present:1; +    void __iomem *remap_diu_addr;  #ifdef CONFIG_SND_HDA_DSP_LOADER      struct azx_dev saved_azx_dev; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8ff7902ec6e6..5ac2e95fd5fa 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -241,7 +241,8 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"               "{VIA, VT8251},"               "{VIA, VT8237A},"               "{ULI, M5461}," -             "{ZX, ZhaoxinHDA}}"); +             "{ZX, ZhaoxinHDA}," +             "{ZX, ZhaoxinHDMI}}");  MODULE_DESCRIPTION("Intel HDA driver");  #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO) @@ -273,6 +274,7 @@ enum {      AZX_DRIVER_CTHDA,      AZX_DRIVER_CMEDIA,      AZX_DRIVER_ZHAOXIN, +    AZX_DRIVER_ZXHDMI,      AZX_DRIVER_GENERIC,      AZX_NUM_DRIVERS, /* keep this as last entry */  }; @@ -390,6 +392,7 @@ static const char * const driver_short_names[] = {      [AZX_DRIVER_CTHDA] = "HDA Creative",      [AZX_DRIVER_CMEDIA] = "HDA C-Media",      [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", +    [AZX_DRIVER_ZXHDMI] = "HDA Zhaoxin GFX",      [AZX_DRIVER_GENERIC] = "HD-Audio Generic",  }; @@ -411,6 +414,29 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg,      pci_write_config_byte(pci, reg, data);  } +static int azx_init_chip_zx(struct azx *chip) +{ +    struct snd_card *card = chip->card; +    unsigned int diu_reg; +    struct pci_dev *diu_pci = NULL; + +    diu_pci = pci_get_device(0x1d17, 0x3a03, NULL); +    if (!diu_pci) { +        dev_err(card->dev, "hda no chx001 device.\n"); +        return -ENXIO; +    } +    pci_read_config_dword(diu_pci, PCI_BASE_ADDRESS_0, &diu_reg); +    chip->remap_diu_addr = ioremap(diu_reg, 0x50000); +    dev_info(card->dev, "hda %x %p\n", diu_reg, chip->remap_diu_addr); +    return 0; +} + +static void azx_free_chip_zx(struct azx *chip) +{ +    if (chip->remap_diu_addr) +        iounmap(chip->remap_diu_addr); +} +  static void azx_init_pci(struct azx *chip)  {      int snoop_type = azx_get_snoop_type(chip); @@ -1386,6 +1412,9 @@ static void azx_free(struct azx *chip)      hda->init_failed = 1; /* to be sure */      complete_all(&hda->probe_wait); +    if (chip->driver_type == AZX_DRIVER_ZXHDMI) +        azx_free_chip_zx(chip); +      if (use_vga_switcheroo(hda)) {          if (chip->disabled && hda->probe_continued)              snd_hda_unlock_devices(&chip->bus); @@ -1786,6 +1815,8 @@ static int default_bdl_pos_adj(struct azx *chip)      case AZX_DRIVER_ICH:      case AZX_DRIVER_PCH:          return 1; +    case AZX_DRIVER_ZXHDMI: +        return 128;      default:          return 32;      } @@ -1903,6 +1934,11 @@ static int azx_first_init(struct azx *chip)      }  #endif +    chip->remap_diu_addr = NULL; + +    if (chip->driver_type == AZX_DRIVER_ZXHDMI) +        azx_init_chip_zx(chip); +      err = pci_request_regions(pci, "ICH HD audio");      if (err < 0)          return err; @@ -2011,6 +2047,7 @@ static int azx_first_init(struct azx *chip)              chip->playback_streams = ATIHDMI_NUM_PLAYBACK;              chip->capture_streams = ATIHDMI_NUM_CAPTURE;              break; +        case AZX_DRIVER_ZXHDMI:          case AZX_DRIVER_GENERIC:          default:              chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2732,6 +2769,13 @@ static const struct pci_device_id azx_ids[] = {      { PCI_DEVICE(0x1106, 0x9170), .driver_data = AZX_DRIVER_GENERIC },      /* VIA GFX VT6122/VX11 */      { PCI_DEVICE(0x1106, 0x9140), .driver_data = AZX_DRIVER_GENERIC }, +    { PCI_DEVICE(0x1106, 0x9141), .driver_data = AZX_DRIVER_GENERIC  }, +    { PCI_DEVICE(0x1106, 0x9142), +      .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | +      AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY }, +    { PCI_DEVICE(0x1106, 0x9144), +      .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | +      AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY },      /* SIS966 */      { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },      /* ULI M5461 */ @@ -2787,6 +2831,13 @@ static const struct pci_device_id azx_ids[] = {        .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },      /* Zhaoxin */      { PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, +    { PCI_DEVICE(0x1d17, 0x9141), .driver_data = AZX_DRIVER_GENERIC  }, +    { PCI_DEVICE(0x1d17, 0x9142), +      .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | +      AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY }, +    { PCI_DEVICE(0x1d17, 0x9144), +      .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | +      AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY },      { 0, }  };  MODULE_DEVICE_TABLE(pci, azx_ids); -- 2.20.1
1 0
0 0
[PATCH OLK-5.10 1/3] ALSA: hda: Add Zhaoxin SB HDAC non snoop path support
by LeoLiuoc 09 Jul '21

09 Jul '21
Add Zhaoxin SB HDAC non snoop path support. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> ---  sound/pci/hda/hda_intel.c | 13 +++++++++++--  1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4c8b281c3992..8ff7902ec6e6 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -240,8 +240,8 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"               "{ATI, RV770},"               "{VIA, VT8251},"               "{VIA, VT8237A}," -             "{SiS, SIS966}," -             "{ULI, M5461}}"); +             "{ULI, M5461}," +             "{ZX, ZhaoxinHDA}}");  MODULE_DESCRIPTION("Intel HDA driver");  #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO) @@ -1744,6 +1744,15 @@ static void azx_check_snoop_available(struct azx *chip)              snoop = false;      } +    if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE && +        chip->driver_type == AZX_DRIVER_ZHAOXIN) { +        u8 val1; + +        pci_read_config_byte(chip->pci, 0x42, &val1); +        if (!(val1 & 0x80) && chip->pci->revision == 0x20) +            snoop = false; +    } +      if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF)          snoop = false; -- 2.20.1
1 0
0 0
  • ← Newer
  • 1
  • ...
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • Older →

HyperKitty Powered by HyperKitty