cat data |sort|uniq > new_data #该方法可以实现,但是你需要花上好几个小时。结果才能出来。
下面是一个使用perl脚本来完成此功能的小工具。原理很简单,创建一个hash,每行的内容为键,值由每行出现的次数来填充,脚本如下;
- #!/usr/bin/perl
-
# Author :CaoJiangfeng
-
# Date:2011-09-28
-
# Version :1.0
-
use warnings;
-
use strict;
-
-
my %hash;
-
my $script = $0; # Get the script name
-
-
sub usage
-
{
-
printf("Usage:\n");
-
printf("perl $script
\n" -
-
}
-
-
# If the number of parameters less than 2 ,exit the script
-
if ( $#ARGV+1 < 2) {
-
-
&usage;
-
exit 0;
-
}
-
-
-
my $source_file = $ARGV[0]; #File need to remove duplicate rows
-
my $dest_file = $ARGV[1]; # File after remove duplicates rows
-
-
open (FILE,"<$source_file") or die "Cannot open file $!\n";
-
open (SORTED,">$dest_file") or die "Cannot open file $!\n";
-
-
while(defined (my $line = <FILE>))
-
{
-
chomp($line);
- $hash{$line} += 1;
- # print "$line,$hash{$line}\n";
-
}
-
-
foreach my $k (keys %hash) {
-
print SORTED "$k,$hash{$k}\n";#改行打印出列和该列出现的次数到目标文件
-
}
-
close (FILE);
- close (SORTED);