Datasets:

Modalities:
Text
Formats:
parquet
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
Dataset Viewer
Auto-converted to Parquet Duplicate
id
stringlengths
9
9
text
stringlengths
16
864
title
stringclasses
1 value
000000000
for idx, elem in enumerate(numbers): for idx2, elem2 in enumerate(numbers): if idx != idx2: distance = abs(elem - elem2) if distance < threshold: return True return False
000000001
result = [] current_string = [] current_depth = 0 for c in paren_string: if c == '(': current_depth += 1 current_string.append(c) elif c == ')': current_depth -= 1 current_string.append(c) if current_depth == 0: result.append(''.join(current_string)) current_string.clear() return result
000000002
return number % 1.0
000000003
balance = 0 for op in operations: balance += op if balance < 0: return True return False
000000004
mean = sum(numbers) / len(numbers) return sum(abs(x - mean) for x in numbers) / len(numbers)
000000005
if not numbers: return [] result = [] for n in numbers[:-1]: result.append(n) result.append(delimeter) result.append(numbers[-1]) return result
000000006
def parse_paren_group(s): depth = 0 max_depth = 0 for c in s: if c == '(': depth += 1 max_depth = max(depth, max_depth) else: depth -= 1 return max_depth return [parse_paren_group(x) for x in paren_string.split(' ') if x]
000000007
return [x for x in strings if substring in x]
000000008
sum_value = 0 prod_value = 1 for n in numbers: sum_value += n prod_value *= n return sum_value, prod_value
000000009
running_max = None result = [] for n in numbers: if running_max is None: running_max = n else: running_max = max(running_max, n) result.append(running_max) return result
000000010
def xor(i, j): if i == j: return '0' else: return '1' return ''.join(xor(x, y) for x, y in zip(a, b))
000000011
if not strings: return None maxlen = max(len(x) for x in strings) for s in strings: if len(s) == maxlen: return s
000000012
while b: a, b = b, a % b return a
000000013
result = [] for i in range(len(string)): result.append(string[:i+1]) return result
000000014
return ' '.join([str(x) for x in range(n + 1)])
000000015
return len(set(string.lower()))
000000016
note_map = {'o': 4, 'o|': 2, '.|': 1} return [note_map[x] for x in music_string.split(' ') if x]
000000017
times = 0 for i in range(len(string) - len(substring) + 1): if string[i:i+len(substring)] == substring: times += 1 return times
000000018
value_map = { 'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9 } return ' '.join(sorted([x for x in numbers.split(' ') if x], key=lambda x: value_map[x]))
000000019
closest_pair = None distance = None for idx, elem in enumerate(numbers): for idx2, elem2 in enumerate(numbers): if idx != idx2: if distance is None: distance = abs(elem - elem2) closest_pair = tuple(sorted([elem, elem2])) else: new_distance = abs(elem - elem2) if new_distance < distance: distance = new_distance closest_pair = tuple(sorted([elem, elem2])) return closest_pair
000000020
min_number = min(numbers) max_number = max(numbers) return [(x - min_number) / (max_number - min_number) for x in numbers]
000000021
return [x for x in values if isinstance(x, int)]
000000022
return len(string)
000000023
for i in reversed(range(n)): if n % i == 0: return i
000000024
import math fact = [] i = 2 while i <= int(math.sqrt(n) + 1): if n % i == 0: fact.append(i) n //= i else: i += 1 if n > 1: fact.append(n) return fact
000000025
import collections c = collections.Counter(numbers) return [n for n in numbers if c[n] <= 1]
000000026
return string.swapcase()
000000027
return ''.join(strings)
000000028
return [x for x in strings if x.startswith(prefix)]
000000029
return [e for e in l if e > 0]
000000030
if n < 2: return False for k in range(2, n - 1): if n % k == 0: return False return True
000000031
l = list(l) l[::3] = sorted(l[::3]) return l
000000032
return sorted(list(set(l)))
000000033
m = l[0] for e in l: if e > m: m = e return m
000000034
ns = [] for i in range(n): if i % 11 == 0 or i % 13 == 0: ns.append(i) s = ''.join(list(map(str, ns))) ans = 0 for c in s: ans += (c == '7') return ans
000000035
evens = l[::2] odds = l[1::2] evens.sort() ans = [] for e, o in zip(evens, odds): ans.extend([e, o]) if len(evens) > len(odds): ans.append(evens[-1]) return ans
000000036
import math def is_prime(p): if p < 2: return False for k in range(2, min(int(math.sqrt(p)) + 1, p - 1)): if p % k == 0: return False return True f = [0, 1] while True: f.append(f[-1] + f[-2]) if is_prime(f[-1]): n -= 1 if n == 0: return f[-1]
000000037
for i in range(len(l)): for j in range(i + 1, len(l)): for k in range(j + 1, len(l)): if l[i] + l[j] + l[k] == 0: return True return False
000000038
return n**2
000000039
return [(e + 1) for e in l]
000000040
for i, l1 in enumerate(l): for j in range(i + 1, len(l)): if l1 + l[j] == 0: return True return False
000000041
ret = "" while x > 0: ret = str(x % base) + ret x //= base return ret
000000042
return a * h / 2.0
000000043
results = [0, 0, 2, 0] if n < 4: return results[n] for _ in range(4, n + 1): results.append(results[-1] + results[-2] + results[-3] + results[-4]) results.pop(0) return results[-1]
000000044
l = sorted(l) if len(l) % 2 == 1: return l[len(l) // 2] else: return (l[len(l) // 2 - 1] + l[len(l) // 2]) / 2.0
000000045
for i in range(len(text)): if text[i] != text[len(text) - 1 - i]: return False return True
000000046
ret = 1 for i in range(n): ret = (2 * ret) % p return ret
000000047
return "".join([s for s in text if s.lower() not in ["a", "e", "i", "o", "u"]])
000000048
for e in l: if e >= t: return False return True
000000049
return x + y
000000050
return set(s0) == set(s1)
000000051
if n == 0: return 0 if n == 1: return 1 return fib(n - 1) + fib(n - 2)
000000052
depth = 0 for b in brackets: if b == "<": depth += 1 else: depth -= 1 if depth < 0: return False return depth == 0
000000053
if l == sorted(l) or l == sorted(l, reverse=True): return True return False
000000054
ret = set() for e1 in l1: for e2 in l2: if e1 == e2: ret.add(e1) return sorted(list(ret))
000000055
def is_prime(k): if k < 2: return False for i in range(2, k - 1): if k % i == 0: return False return True largest = 1 for j in range(2, n + 1): if n % j == 0 and is_prime(j): largest = max(largest, j) return largest
000000056
return sum(range(n + 1))
000000057
depth = 0 for b in brackets: if b == "(": depth += 1 else: depth -= 1 if depth < 0: return False return depth == 0
000000058
return [(i * x) for i, x in enumerate(xs)][1:]
000000059
if n == 0: return 0 if n == 1: return 0 if n == 2: return 1 return fibfib(n - 1) + fibfib(n - 2) + fibfib(n - 3)
000000060
s = str(x) if shift > len(s): return s[::-1] else: return s[len(s) - shift:] + s[:len(s) - shift]
000000061
if s == "": return 0 return sum(ord(char) if char.isupper() else 0 for char in s)
000000062
lis = list() for i in s.split(' '): if i.isdigit(): lis.append(int(i)) return n - sum(lis)
000000063
if(len(arr) == 0): return [] evens = list(filter(lambda x: x%2 == 0, arr)) if(evens == []): return [] return [min(evens), arr.index(min(evens))]
000000064
frq = [0] * (max(lst) + 1) for i in lst: frq[i] += 1; ans = -1 for i in range(1, len(frq)): if frq[i] >= i: ans = i return ans
000000065
res, switch = [], True while lst: res.append(min(lst) if switch else max(lst)) lst.remove(res[-1]) switch = not switch return res
000000066
if a + b <= c or a + c <= b or b + c <= a: return -1 s = (a + b + c)/2 area = (s * (s - a) * (s - b) * (s - c)) ** 0.5 area = round(area, 2) return area
000000067
if sum(q) > w: return False i, j = 0, len(q)-1 while i<j: if q[i] != q[j]: return False i+=1 j-=1 return True
000000068
ans = 0 for i in range(len(arr) // 2): if arr[i] != arr[len(arr) - i - 1]: ans += 1 return ans
000000069
l1 = 0 for st in lst1: l1 += len(st) l2 = 0 for st in lst2: l2 += len(st) if l1 <= l2: return lst1 else: return lst2
000000070
def is_prime(n): for j in range(2,n): if n%j == 0: return False return True for i in range(2,101): if not is_prime(i): continue for j in range(2,101): if not is_prime(j): continue for k in range(2,101): if not is_prime(k): continue if i*j*k == a: return True return False
000000071
if (n == 1): return (x == 1) power = 1 while (power < x): power = power * n return (power == x)
000000072
a = abs(a) return int(round(a ** (1. / 3))) ** 3 == a
000000073
primes = ('2', '3', '5', '7', 'B', 'D') total = 0 for i in range(0, len(num)): if num[i] in primes: total += 1 return total
000000074
return "db" + bin(decimal)[2:] + "db"
000000075
if len(s) < 3: return False for i in range(len(s) - 2): if s[i] == s[i+1] or s[i+1] == s[i+2] or s[i] == s[i+2]: return False return True
000000076
letter_grade = [] for gpa in grades: if gpa == 4.0: letter_grade.append("A+") elif gpa > 3.7: letter_grade.append("A") elif gpa > 3.3: letter_grade.append("A-") elif gpa > 3.0: letter_grade.append("B+") elif gpa > 2.7: letter_grade.append("B") elif gpa > 2.3: letter_grade.append("B-") elif gpa > 2.0: letter_grade.append("C+") elif gpa > 1.7: letter_grade.append("C") elif gpa > 1.3: letter_grade.append("C-") elif gpa > 1.0: letter_grade.append("D+") elif gpa > 0.7: letter_grade.append("D") elif gpa > 0.0: letter_grade.append("D-") else: letter_grade.append("E") return letter_grade
000000077
l = len(string) if l == 0 or l == 1: return False for i in range(2, l): if l % i == 0: return False return True
000000078
if n == 1: return 1 return 18 * (10 ** (n - 2))
000000079
return bin(sum(int(i) for i in str(N)))[2:]
000000080
return sum([lst[i] for i in range(1, len(lst), 2) if lst[i]%2 == 0])
000000081
return ' '.join([''.join(sorted(list(i))) for i in s.split(' ')])
000000082
coords = [(i, j) for i in range(len(lst)) for j in range(len(lst[i])) if lst[i][j] == x] return sorted(sorted(coords, key=lambda x: x[1], reverse=True), key=lambda x: x[0])
000000083
return [] if len(array) == 0 else sorted(array, reverse= (array[0]+array[-1]) % 2 == 0)
000000084
d = 'abcdefghijklmnopqrstuvwxyz' out = '' for c in s: if c in d: out += d[(d.index(c)+2*2) % 26] else: out += c return out
000000085
lst = sorted(set(lst)) return None if len(lst) < 2 else lst[1]
000000086
import re sentences = re.split(r'[.?!]\s*', S) return sum(sentence[0:2] == 'I ' for sentence in sentences)
000000087
if isinstance(x,int) and isinstance(y,int) and isinstance(z,int): if (x+y==z) or (x+z==y) or (y+z==x): return True return False return False
000000088
vowels = "aeiouAEIOU" vowels_replace = dict([(i, chr(ord(i) + 2)) for i in vowels]) message = message.swapcase() return ''.join([vowels_replace[i] if i in vowels else i for i in message])
000000089
def isPrime(n): for i in range(2,int(n**0.5)+1): if n%i==0: return False return True maxx = 0 i = 0 while i < len(lst): if(lst[i] > maxx and isPrime(lst[i])): maxx = lst[i] i+=1 result = sum(int(digit) for digit in str(maxx)) return result
000000090
if len(dict.keys()) == 0: return False else: state = "start" for key in dict.keys(): if isinstance(key, str) == False: state = "mixed" break if state == "start": if key.isupper(): state = "upper" elif key.islower(): state = "lower" else: break elif (state == "upper" and not key.isupper()) or (state == "lower" and not key.islower()): state = "mixed" break else: break return state == "upper" or state == "lower"
000000091
primes = [] for i in range(2, n): is_prime = True for j in range(2, i): if i % j == 0: is_prime = False break if is_prime: primes.append(i) return primes
000000092
return abs(a % 10) * abs(b % 10)
000000093
count = 0 for i in range(0,len(s),2): if s[i] in "AEIOU": count += 1 return count
000000094
from math import floor, ceil if value.count('.') == 1: # remove trailing zeros while (value[-1] == '0'): value = value[:-1] num = float(value) if value[-2:] == '.5': if num > 0: res = ceil(num) else: res = floor(num) elif len(value) > 0: res = int(round(num)) else: res = 0 return res
000000095
return [n + 2*i for i in range(n)]
000000096
if not s: return [] s_list = [] for letter in s: if letter == ',': s_list.append(' ') else: s_list.append(letter) s_list = "".join(s_list) return s_list.split()
000000097
if x > y: return -1 if y % 2 == 0: return y if x == y: return -1 return y - 1
000000098
if m < n: return -1 summation = 0 for i in range(n, m+1): summation += i return bin(round(summation/(m - n + 1)))
000000099
odd_digit_elements = [] for i in x: if all (int(c) % 2 == 1 for c in str(i)): odd_digit_elements.append(i) return sorted(odd_digit_elements)
End of preview. Expand in Data Studio

HumanEvalRetrieval

An MTEB dataset
Massive Text Embedding Benchmark

A code retrieval task based on 164 Python programming problems from HumanEval. Each query is a natural language description of a programming task (e.g., 'Check if in given list of numbers, are any two numbers closer to each other than given threshold'), and the corpus contains Python code implementations. The task is to retrieve the correct code snippet that solves the described problem. Queries are problem descriptions while the corpus contains Python function implementations with proper indentation and logic.

Task category t2t
Domains Programming
Reference https://huggingface.co/datasets/embedding-benchmark/HumanEval

Source datasets:

How to evaluate on this task

You can evaluate an embedding model on this dataset using the following code:

import mteb

task = mteb.get_task("HumanEvalRetrieval")
evaluator = mteb.MTEB([task])

model = mteb.get_model(YOUR_MODEL)
evaluator.run(model)

To learn more about how to run models on mteb task check out the GitHub repository.

Citation

If you use this dataset, please cite the dataset as well as mteb, as this dataset likely includes additional processing as a part of the MMTEB Contribution.

@article{chen2021evaluating,
  archiveprefix = {arXiv},
  author = {Chen, Mark and Tworek, Jerry and Jun, Heewoo and Yuan, Qiming and Pinto, Henrique Ponde de Oliveira and Kaplan, Jared and Edwards, Harri and Burda, Yuri and Joseph, Nicholas and Brockman, Greg and Ray, Alex and Puri, Raul and Krueger, Gretchen and Petrov, Michael and Khlaaf, Heidy and Sastry, Girish and Mishkin, Pamela and Chan, Brooke and Gray, Scott and Ryder, Nick and Pavlov, Mikhail and Power, Alethea and Kaiser, Lukasz and Bavarian, Mohammad and Winter, Clemens and Tillet, Philippe and Such, Felipe Petroski and Cummings, Dave and Plappert, Matthias and Chantzis, Fotios and Barnes, Elizabeth and Herbert-Voss, Ariel and Guss, William Hebgen and Nichol, Alex and Paino, Alex and Tezak, Nikolas and Tang, Jie and Babuschkin, Igor and Balaji, Suchir and Jain, Shantanu and Saunders, William and Hesse, Christopher and Carr, Andrew N. and Leike, Jan and Achiam, Joshua and Misra, Vedant and Morikawa, Evan and Radford, Alec and Knight, Matthew and Brundage, Miles and Murati, Mira and Mayer, Katie and Welinder, Peter and McGrew, Bob and Amodei, Dario and McCandlish, Sam and Sutskever, Ilya and Zaremba, Wojciech},
  eprint = {2107.03374},
  primaryclass = {cs.LG},
  title = {Evaluating Large Language Models Trained on Code},
  year = {2021},
}

@article{enevoldsen2025mmtebmassivemultilingualtext,
  title={MMTEB: Massive Multilingual Text Embedding Benchmark},
  author={Kenneth Enevoldsen and Isaac Chung and Imene Kerboua and Márton Kardos and Ashwin Mathur and David Stap and Jay Gala and Wissam Siblini and Dominik Krzemiński and Genta Indra Winata and Saba Sturua and Saiteja Utpala and Mathieu Ciancone and Marion Schaeffer and Gabriel Sequeira and Diganta Misra and Shreeya Dhakal and Jonathan Rystrøm and Roman Solomatin and Ömer Çağatan and Akash Kundu and Martin Bernstorff and Shitao Xiao and Akshita Sukhlecha and Bhavish Pahwa and Rafał Poświata and Kranthi Kiran GV and Shawon Ashraf and Daniel Auras and Björn Plüster and Jan Philipp Harries and Loïc Magne and Isabelle Mohr and Mariya Hendriksen and Dawei Zhu and Hippolyte Gisserot-Boukhlef and Tom Aarsen and Jan Kostkan and Konrad Wojtasik and Taemin Lee and Marek Šuppa and Crystina Zhang and Roberta Rocca and Mohammed Hamdy and Andrianos Michail and John Yang and Manuel Faysse and Aleksei Vatolin and Nandan Thakur and Manan Dey and Dipam Vasani and Pranjal Chitale and Simone Tedeschi and Nguyen Tai and Artem Snegirev and Michael Günther and Mengzhou Xia and Weijia Shi and Xing Han Lù and Jordan Clive and Gayatri Krishnakumar and Anna Maksimova and Silvan Wehrli and Maria Tikhonova and Henil Panchal and Aleksandr Abramov and Malte Ostendorff and Zheng Liu and Simon Clematide and Lester James Miranda and Alena Fenogenova and Guangyu Song and Ruqiya Bin Safi and Wen-Ding Li and Alessia Borghini and Federico Cassano and Hongjin Su and Jimmy Lin and Howard Yen and Lasse Hansen and Sara Hooker and Chenghao Xiao and Vaibhav Adlakha and Orion Weller and Siva Reddy and Niklas Muennighoff},
  publisher = {arXiv},
  journal={arXiv preprint arXiv:2502.13595},
  year={2025},
  url={https://arxiv.org/abs/2502.13595},
  doi = {10.48550/arXiv.2502.13595},
}

@article{muennighoff2022mteb,
  author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Loïc and Reimers, Nils},
  title = {MTEB: Massive Text Embedding Benchmark},
  publisher = {arXiv},
  journal={arXiv preprint arXiv:2210.07316},
  year = {2022}
  url = {https://arxiv.org/abs/2210.07316},
  doi = {10.48550/ARXIV.2210.07316},
}

Dataset Statistics

Dataset Statistics

The following code contains the descriptive statistics from the task. These can also be obtained using:

import mteb

task = mteb.get_task("HumanEvalRetrieval")

desc_stats = task.metadata.descriptive_stats
{
    "test": {
        "num_samples": 316,
        "number_of_characters": 73983,
        "documents_text_statistics": {
            "total_text_length": 27965,
            "min_text_length": 11,
            "average_text_length": 176.99367088607596,
            "max_text_length": 854,
            "unique_texts": 158
        },
        "documents_image_statistics": null,
        "queries_text_statistics": {
            "total_text_length": 46018,
            "min_text_length": 23,
            "average_text_length": 291.253164556962,
            "max_text_length": 1200,
            "unique_texts": 158
        },
        "queries_image_statistics": null,
        "relevant_docs_statistics": {
            "num_relevant_docs": 158,
            "min_relevant_docs_per_query": 1,
            "average_relevant_docs_per_query": 1.0,
            "max_relevant_docs_per_query": 1,
            "unique_relevant_docs": 158
        },
        "top_ranked_statistics": null
    }
}

This dataset card was automatically generated using MTEB

Downloads last month
29